2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The User Datagram Protocol (UDP).
8 * Version: $Id: udp.c,v 1.102 2002/02/01 22:01:04 davem Exp $
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
13 * Alan Cox, <Alan.Cox@linux.org>
14 * Hirokazu Takahashi, <taka@valinux.co.jp>
17 * Alan Cox : verify_area() calls
18 * Alan Cox : stopped close while in use off icmp
19 * messages. Not a fix but a botch that
20 * for udp at least is 'valid'.
21 * Alan Cox : Fixed icmp handling properly
22 * Alan Cox : Correct error for oversized datagrams
23 * Alan Cox : Tidied select() semantics.
24 * Alan Cox : udp_err() fixed properly, also now
25 * select and read wake correctly on errors
26 * Alan Cox : udp_send verify_area moved to avoid mem leak
27 * Alan Cox : UDP can count its memory
28 * Alan Cox : send to an unknown connection causes
29 * an ECONNREFUSED off the icmp, but
31 * Alan Cox : Switched to new sk_buff handlers. No more backlog!
32 * Alan Cox : Using generic datagram code. Even smaller and the PEEK
33 * bug no longer crashes it.
34 * Fred Van Kempen : Net2e support for sk->broadcast.
35 * Alan Cox : Uses skb_free_datagram
36 * Alan Cox : Added get/set sockopt support.
37 * Alan Cox : Broadcasting without option set returns EACCES.
38 * Alan Cox : No wakeup calls. Instead we now use the callbacks.
39 * Alan Cox : Use ip_tos and ip_ttl
40 * Alan Cox : SNMP Mibs
41 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
42 * Matt Dillon : UDP length checks.
43 * Alan Cox : Smarter af_inet used properly.
44 * Alan Cox : Use new kernel side addressing.
45 * Alan Cox : Incorrect return on truncated datagram receive.
46 * Arnt Gulbrandsen : New udp_send and stuff
47 * Alan Cox : Cache last socket
48 * Alan Cox : Route cache
49 * Jon Peatfield : Minor efficiency fix to sendto().
50 * Mike Shaver : RFC1122 checks.
51 * Alan Cox : Nonblocking error fix.
52 * Willy Konynenberg : Transparent proxying support.
53 * Mike McLagan : Routing by source
54 * David S. Miller : New socket lookup architecture.
55 * Last socket cache retained as it
56 * does have a high hit rate.
57 * Olaf Kirch : Don't linearise iovec on sendmsg.
58 * Andi Kleen : Some cleanups, cache destination entry
60 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
61 * Melvin Smith : Check msg_name not msg_namelen in sendto(),
62 * return ENOTCONN for unconnected sockets (POSIX)
63 * Janos Farkas : don't deliver multi/broadcasts to a different
64 * bound-to-device socket
65 * Hirokazu Takahashi : HW checksumming for outgoing UDP
67 * Hirokazu Takahashi : sendfile() on UDP works now.
68 * Arnaldo C. Melo : convert /proc/net/udp to seq_file
69 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
70 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
71 * a single port at the same time.
72 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
75 * This program is free software; you can redistribute it and/or
76 * modify it under the terms of the GNU General Public License
77 * as published by the Free Software Foundation; either version
78 * 2 of the License, or (at your option) any later version.
81 #include <asm/system.h>
82 #include <asm/uaccess.h>
83 #include <asm/ioctls.h>
84 #include <linux/types.h>
85 #include <linux/fcntl.h>
86 #include <linux/module.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
90 #include <linux/errno.h>
91 #include <linux/timer.h>
93 #include <linux/config.h>
94 #include <linux/inet.h>
95 #include <linux/ipv6.h>
96 #include <linux/netdevice.h>
99 #include <net/protocol.h>
100 #include <linux/skbuff.h>
101 #include <linux/proc_fs.h>
102 #include <linux/seq_file.h>
103 #include <net/sock.h>
105 #include <net/icmp.h>
106 #include <net/route.h>
107 #include <net/inet_common.h>
108 #include <net/checksum.h>
109 #include <net/xfrm.h>
112 * Snmp MIB for the UDP layer
115 DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
117 struct hlist_head udp_hash[UDP_HTABLE_SIZE];
118 rwlock_t udp_hash_lock = RW_LOCK_UNLOCKED;
120 /* Shared by v4/v6 udp. */
123 static int udp_v4_get_port(struct sock *sk, unsigned short snum)
125 struct hlist_node *node;
127 struct inet_opt *inet = inet_sk(sk);
129 write_lock_bh(&udp_hash_lock);
131 int best_size_so_far, best, result, i;
133 if (udp_port_rover > sysctl_local_port_range[1] ||
134 udp_port_rover < sysctl_local_port_range[0])
135 udp_port_rover = sysctl_local_port_range[0];
136 best_size_so_far = 32767;
137 best = result = udp_port_rover;
138 for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) {
139 struct hlist_head *list;
142 list = &udp_hash[result & (UDP_HTABLE_SIZE - 1)];
143 if (hlist_empty(list)) {
144 if (result > sysctl_local_port_range[1])
145 result = sysctl_local_port_range[0] +
146 ((result - sysctl_local_port_range[0]) &
147 (UDP_HTABLE_SIZE - 1));
151 sk_for_each(sk2, node, list)
152 if (++size >= best_size_so_far)
154 best_size_so_far = size;
159 for(i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++, result += UDP_HTABLE_SIZE) {
160 if (result > sysctl_local_port_range[1])
161 result = sysctl_local_port_range[0]
162 + ((result - sysctl_local_port_range[0]) &
163 (UDP_HTABLE_SIZE - 1));
164 if (!udp_lport_inuse(result))
167 if (i >= (1 << 16) / UDP_HTABLE_SIZE)
170 udp_port_rover = snum = result;
172 sk_for_each(sk2, node,
173 &udp_hash[snum & (UDP_HTABLE_SIZE - 1)]) {
174 struct inet_opt *inet2 = inet_sk(sk2);
176 if (inet2->num == snum &&
178 !ipv6_only_sock(sk2) &&
179 (!sk2->sk_bound_dev_if ||
180 !sk->sk_bound_dev_if ||
181 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
182 (!inet2->rcv_saddr ||
184 inet2->rcv_saddr == inet->rcv_saddr) &&
185 (!sk2->sk_reuse || !sk->sk_reuse))
190 if (sk_unhashed(sk)) {
191 struct hlist_head *h = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
194 sock_prot_inc_use(sk->sk_prot);
196 write_unlock_bh(&udp_hash_lock);
200 write_unlock_bh(&udp_hash_lock);
204 static void udp_v4_hash(struct sock *sk)
209 static void udp_v4_unhash(struct sock *sk)
211 write_lock_bh(&udp_hash_lock);
212 if (sk_del_node_init(sk)) {
213 inet_sk(sk)->num = 0;
214 sock_prot_dec_use(sk->sk_prot);
216 write_unlock_bh(&udp_hash_lock);
219 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
220 * harder than this. -DaveM
222 struct sock *udp_v4_lookup_longway(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif)
224 struct sock *sk, *result = NULL;
225 struct hlist_node *node;
226 unsigned short hnum = ntohs(dport);
229 sk_for_each(sk, node, &udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]) {
230 struct inet_opt *inet = inet_sk(sk);
232 if (inet->num == hnum && !ipv6_only_sock(sk)) {
233 int score = (sk->sk_family == PF_INET ? 1 : 0);
234 if (inet->rcv_saddr) {
235 if (inet->rcv_saddr != daddr)
240 if (inet->daddr != saddr)
245 if (inet->dport != sport)
249 if (sk->sk_bound_dev_if) {
250 if (sk->sk_bound_dev_if != dif)
257 } else if(score > badness) {
266 __inline__ struct sock *udp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif)
270 read_lock(&udp_hash_lock);
271 sk = udp_v4_lookup_longway(saddr, sport, daddr, dport, dif);
274 read_unlock(&udp_hash_lock);
278 static inline struct sock *udp_v4_mcast_next(struct sock *sk,
279 u16 loc_port, u32 loc_addr,
280 u16 rmt_port, u32 rmt_addr,
283 struct hlist_node *node;
285 unsigned short hnum = ntohs(loc_port);
287 sk_for_each_from(s, node) {
288 struct inet_opt *inet = inet_sk(s);
290 if (inet->num != hnum ||
291 (inet->daddr && inet->daddr != rmt_addr) ||
292 (inet->dport != rmt_port && inet->dport) ||
293 (inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||
295 (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
297 if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif))
307 * This routine is called by the ICMP module when it gets some
308 * sort of error condition. If err < 0 then the socket should
309 * be closed and the error returned to the user. If err > 0
310 * it's just the icmp type << 8 | icmp code.
311 * Header points to the ip header of the error packet. We move
312 * on past this. Then (as it used to claim before adjustment)
313 * header points to the first 8 bytes of the udp header. We need
314 * to find the appropriate port.
317 void udp_err(struct sk_buff *skb, u32 info)
319 struct inet_opt *inet;
320 struct iphdr *iph = (struct iphdr*)skb->data;
321 struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2));
322 int type = skb->h.icmph->type;
323 int code = skb->h.icmph->code;
328 sk = udp_v4_lookup(iph->daddr, uh->dest, iph->saddr, uh->source, skb->dev->ifindex);
330 ICMP_INC_STATS_BH(IcmpInErrors);
331 return; /* No socket for error */
340 case ICMP_TIME_EXCEEDED:
343 case ICMP_SOURCE_QUENCH:
345 case ICMP_PARAMETERPROB:
349 case ICMP_DEST_UNREACH:
350 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
351 if (inet->pmtudisc != IP_PMTUDISC_DONT) {
359 if (code <= NR_ICMP_UNREACH) {
360 harderr = icmp_err_convert[code].fatal;
361 err = icmp_err_convert[code].errno;
367 * RFC1122: OK. Passes ICMP errors back to application, as per
370 if (!inet->recverr) {
371 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
374 ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1));
377 sk->sk_error_report(sk);
383 * Throw away all pending data and cancel the corking. Socket is locked.
385 static void udp_flush_pending_frames(struct sock *sk)
387 struct udp_opt *up = udp_sk(sk);
392 ip_flush_pending_frames(sk);
397 * Push out all pending data as one UDP datagram. Socket is locked.
399 static int udp_push_pending_frames(struct sock *sk, struct udp_opt *up)
401 struct inet_opt *inet = inet_sk(sk);
402 struct flowi *fl = &inet->cork.fl;
407 /* Grab the skbuff where UDP header space exists. */
408 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
412 * Create a UDP header
415 uh->source = fl->fl_ip_sport;
416 uh->dest = fl->fl_ip_dport;
417 uh->len = htons(up->len);
420 if (sk->sk_no_check == UDP_CSUM_NOXMIT) {
421 skb->ip_summed = CHECKSUM_NONE;
425 if (skb_queue_len(&sk->sk_write_queue) == 1) {
427 * Only one fragment on the socket.
429 if (skb->ip_summed == CHECKSUM_HW) {
430 skb->csum = offsetof(struct udphdr, check);
431 uh->check = ~csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst,
432 up->len, IPPROTO_UDP, 0);
434 skb->csum = csum_partial((char *)uh,
435 sizeof(struct udphdr), skb->csum);
436 uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst,
437 up->len, IPPROTO_UDP, skb->csum);
442 unsigned int csum = 0;
444 * HW-checksum won't work as there are two or more
445 * fragments on the socket so that all csums of sk_buffs
446 * should be together.
448 if (skb->ip_summed == CHECKSUM_HW) {
449 int offset = (unsigned char *)uh - skb->data;
450 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
452 skb->ip_summed = CHECKSUM_NONE;
454 skb->csum = csum_partial((char *)uh,
455 sizeof(struct udphdr), skb->csum);
458 skb_queue_walk(&sk->sk_write_queue, skb) {
459 csum = csum_add(csum, skb->csum);
461 uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst,
462 up->len, IPPROTO_UDP, csum);
467 err = ip_push_pending_frames(sk);
475 static unsigned short udp_check(struct udphdr *uh, int len, unsigned long saddr, unsigned long daddr, unsigned long base)
477 return(csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base));
480 int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
483 struct inet_opt *inet = inet_sk(sk);
484 struct udp_opt *up = udp_sk(sk);
486 struct ipcm_cookie ipc;
487 struct rtable *rt = NULL;
490 u32 daddr, faddr, saddr;
494 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
503 if (msg->msg_flags&MSG_OOB) /* Mirror BSD error message compatibility */
510 * There are pending frames.
511 * The socket lock must be held while it's corked.
514 if (likely(up->pending)) {
515 if (unlikely(up->pending != AF_INET)) {
523 ulen += sizeof(struct udphdr);
526 * Get and verify the address.
529 struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name;
530 if (msg->msg_namelen < sizeof(*usin))
532 if (usin->sin_family != AF_INET) {
533 if (usin->sin_family != AF_UNSPEC)
537 daddr = usin->sin_addr.s_addr;
538 dport = usin->sin_port;
542 if (sk->sk_state != TCP_ESTABLISHED)
543 return -EDESTADDRREQ;
546 /* Open fast path for connected socket.
547 Route will not be used, if at least one option is set.
551 ipc.addr = inet->saddr;
553 ipc.oif = sk->sk_bound_dev_if;
554 if (msg->msg_controllen) {
555 err = ip_cmsg_send(msg, &ipc);
566 ipc.addr = faddr = daddr;
568 if (ipc.opt && ipc.opt->srr) {
571 faddr = ipc.opt->faddr;
574 tos = RT_TOS(inet->tos);
575 if (sk->sk_localroute || (msg->msg_flags & MSG_DONTROUTE) ||
576 (ipc.opt && ipc.opt->is_strictroute)) {
581 if (MULTICAST(daddr)) {
583 ipc.oif = inet->mc_index;
585 saddr = inet->mc_addr;
590 rt = (struct rtable*)sk_dst_check(sk, 0);
593 struct flowi fl = { .oif = ipc.oif,
598 .proto = IPPROTO_UDP,
600 { .sport = inet->sport,
601 .dport = dport } } };
602 err = ip_route_output_flow(&rt, &fl, sk, !(msg->msg_flags&MSG_DONTWAIT));
607 if ((rt->rt_flags & RTCF_BROADCAST) &&
608 !sock_flag(sk, SOCK_BROADCAST))
611 sk_dst_set(sk, dst_clone(&rt->u.dst));
614 if (msg->msg_flags&MSG_CONFIRM)
620 daddr = ipc.addr = rt->rt_dst;
623 if (unlikely(up->pending)) {
624 /* The socket is already corked while preparing it. */
625 /* ... which is an evident application bug. --ANK */
628 NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "udp cork app bug 2\n"));
633 * Now cork the socket to pend data.
635 inet->cork.fl.fl4_dst = daddr;
636 inet->cork.fl.fl_ip_dport = dport;
637 inet->cork.fl.fl4_src = saddr;
638 inet->cork.fl.fl_ip_sport = inet->sport;
639 up->pending = AF_INET;
643 err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, ulen,
644 sizeof(struct udphdr), &ipc, rt,
645 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
647 udp_flush_pending_frames(sk);
649 err = udp_push_pending_frames(sk, up);
657 UDP_INC_STATS_USER(UdpOutDatagrams);
663 dst_confirm(&rt->u.dst);
664 if (!(msg->msg_flags&MSG_PROBE) || len)
665 goto back_from_confirm;
670 int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags)
672 struct udp_opt *up = udp_sk(sk);
676 struct msghdr msg = { .msg_flags = flags|MSG_MORE };
678 /* Call udp_sendmsg to specify destination address which
679 * sendpage interface can't pass.
680 * This will succeed only when the socket is connected.
682 ret = udp_sendmsg(NULL, sk, &msg, 0);
689 if (unlikely(!up->pending)) {
692 NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "udp cork app bug 3\n"));
696 ret = ip_append_page(sk, page, offset, size, flags);
697 if (ret == -EOPNOTSUPP) {
699 return sock_no_sendpage(sk->sk_socket, page, offset,
703 udp_flush_pending_frames(sk);
708 if (!(up->corkflag || (flags&MSG_MORE)))
709 ret = udp_push_pending_frames(sk, up);
718 * IOCTL requests applicable to the UDP protocol
721 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
727 int amount = atomic_read(&sk->sk_wmem_alloc);
728 return put_user(amount, (int __user *)arg);
734 unsigned long amount;
737 spin_lock_irq(&sk->sk_receive_queue.lock);
738 skb = skb_peek(&sk->sk_receive_queue);
741 * We will only return the amount
742 * of this packet since that is all
745 amount = skb->len - sizeof(struct udphdr);
747 spin_unlock_irq(&sk->sk_receive_queue.lock);
748 return put_user(amount, (int __user *)arg);
757 static __inline__ int __udp_checksum_complete(struct sk_buff *skb)
759 return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
762 static __inline__ int udp_checksum_complete(struct sk_buff *skb)
764 return skb->ip_summed != CHECKSUM_UNNECESSARY &&
765 __udp_checksum_complete(skb);
769 * This should be easy, if there is something there we
770 * return it, otherwise we block.
773 int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
774 size_t len, int noblock, int flags, int *addr_len)
776 struct inet_opt *inet = inet_sk(sk);
777 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
782 * Check any passed addresses
785 *addr_len=sizeof(*sin);
787 if (flags & MSG_ERRQUEUE)
788 return ip_recv_error(sk, msg, len);
791 skb = skb_recv_datagram(sk, flags, noblock, &err);
795 copied = skb->len - sizeof(struct udphdr);
798 msg->msg_flags |= MSG_TRUNC;
801 if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
802 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
804 } else if (msg->msg_flags&MSG_TRUNC) {
805 if (__udp_checksum_complete(skb))
807 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
810 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
819 sock_recv_timestamp(msg, sk, skb);
821 /* Copy the address. */
824 sin->sin_family = AF_INET;
825 sin->sin_port = skb->h.uh->source;
826 sin->sin_addr.s_addr = skb->nh.iph->saddr;
827 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
829 if (inet->cmsg_flags)
830 ip_cmsg_recv(msg, skb);
834 skb_free_datagram(sk, skb);
839 UDP_INC_STATS_BH(UdpInErrors);
842 if (flags&MSG_PEEK) {
844 spin_lock_irq(&sk->sk_receive_queue.lock);
845 if (skb == skb_peek(&sk->sk_receive_queue)) {
846 __skb_unlink(skb, &sk->sk_receive_queue);
849 spin_unlock_irq(&sk->sk_receive_queue.lock);
854 skb_free_datagram(sk, skb);
861 int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
863 struct inet_opt *inet = inet_sk(sk);
864 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
871 if (addr_len < sizeof(*usin))
874 if (usin->sin_family != AF_INET)
875 return -EAFNOSUPPORT;
879 oif = sk->sk_bound_dev_if;
881 if (MULTICAST(usin->sin_addr.s_addr)) {
883 oif = inet->mc_index;
885 saddr = inet->mc_addr;
887 err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr,
888 RT_CONN_FLAGS(sk), oif,
890 inet->sport, usin->sin_port, sk);
893 if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) {
898 inet->saddr = rt->rt_src; /* Update source address */
899 if (!inet->rcv_saddr)
900 inet->rcv_saddr = rt->rt_src;
901 inet->daddr = rt->rt_dst;
902 inet->dport = usin->sin_port;
903 sk->sk_state = TCP_ESTABLISHED;
906 sk_dst_set(sk, &rt->u.dst);
910 int udp_disconnect(struct sock *sk, int flags)
912 struct inet_opt *inet = inet_sk(sk);
914 * 1003.1g - break association.
917 sk->sk_state = TCP_CLOSE;
920 sk->sk_bound_dev_if = 0;
921 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
922 inet_reset_saddr(sk);
924 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
925 sk->sk_prot->unhash(sk);
932 static void udp_close(struct sock *sk, long timeout)
934 inet_sock_release(sk);
938 * 1 if the the UDP system should process it
939 * 0 if we should drop this packet
940 * -1 if it should get processed by xfrm4_rcv_encap
942 static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb)
947 struct udp_opt *up = udp_sk(sk);
948 struct udphdr *uh = skb->h.uh;
952 __u8 *udpdata = (__u8 *)uh + sizeof(struct udphdr);
953 __u32 *udpdata32 = (__u32 *)udpdata;
954 __u16 encap_type = up->encap_type;
956 /* if we're overly short, let UDP handle it */
957 if (udpdata > skb->tail)
960 /* if this is not encapsulated socket, then just return now */
964 len = skb->tail - udpdata;
966 switch (encap_type) {
967 case UDP_ENCAP_ESPINUDP:
968 /* Check if this is a keepalive packet. If so, eat it. */
969 if (len == 1 && udpdata[0] == 0xff) {
971 } else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0 ) {
972 /* ESP Packet without Non-ESP header */
973 len = sizeof(struct udphdr);
975 /* Must be an IKE packet.. pass it through */
979 /* At this point we are sure that this is an ESPinUDP packet,
980 * so we need to remove 'len' bytes from the packet (the UDP
981 * header and optional ESP marker bytes) and then modify the
982 * protocol to ESP, and then call into the transform receiver.
985 /* Now we can update and verify the packet length... */
987 iphlen = iph->ihl << 2;
988 iph->tot_len = htons(ntohs(iph->tot_len) - len);
989 if (skb->len < iphlen + len) {
990 /* packet is too small!?! */
994 /* pull the data buffer up to the ESP header and set the
995 * transport header to point to ESP. Keep UDP on the stack
998 skb->h.raw = skb_pull(skb, len);
1000 /* modify the protocol (it's ESP!) */
1001 iph->protocol = IPPROTO_ESP;
1003 /* and let the caller know to send this into the ESP processor... */
1006 case UDP_ENCAP_ESPINUDP_NON_IKE:
1007 /* Check if this is a keepalive packet. If so, eat it. */
1008 if (len == 1 && udpdata[0] == 0xff) {
1010 } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) &&
1011 udpdata32[0] == 0 && udpdata32[1] == 0) {
1013 /* ESP Packet with Non-IKE marker */
1014 len = sizeof(struct udphdr) + 2 * sizeof(u32);
1017 /* Must be an IKE packet.. pass it through */
1021 if (net_ratelimit())
1022 printk(KERN_INFO "udp_encap_rcv(): Unhandled UDP encap type: %u\n",
1032 * >0: "udp encap" protocol resubmission
1034 * Note that in the success and error cases, the skb is assumed to
1035 * have either been requeued or freed.
1037 static int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
1039 struct udp_opt *up = udp_sk(sk);
1042 * Charge it to the socket, dropping if the queue is full.
1044 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
1049 if (up->encap_type) {
1051 * This is an encapsulation socket, so let's see if this is
1052 * an encapsulated packet.
1053 * If it's a keepalive packet, then just eat it.
1054 * If it's an encapsulateed packet, then pass it to the
1055 * IPsec xfrm input and return the response
1056 * appropriately. Otherwise, just fall through and
1057 * pass this up the UDP socket.
1061 ret = udp_encap_rcv(sk, skb);
1063 /* Eat the packet .. */
1068 /* process the ESP packet */
1069 ret = xfrm4_rcv_encap(skb, up->encap_type);
1070 UDP_INC_STATS_BH(UdpInDatagrams);
1073 /* FALLTHROUGH -- it's a UDP Packet */
1076 if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
1077 if (__udp_checksum_complete(skb)) {
1078 UDP_INC_STATS_BH(UdpInErrors);
1082 skb->ip_summed = CHECKSUM_UNNECESSARY;
1085 if (sock_queue_rcv_skb(sk,skb)<0) {
1086 UDP_INC_STATS_BH(UdpInErrors);
1090 UDP_INC_STATS_BH(UdpInDatagrams);
1095 * Multicasts and broadcasts go to each listener.
1097 * Note: called only from the BH handler context,
1098 * so we don't need to lock the hashes.
1100 static int udp_v4_mcast_deliver(struct sk_buff *skb, struct udphdr *uh,
1101 u32 saddr, u32 daddr)
1106 read_lock(&udp_hash_lock);
1107 sk = sk_head(&udp_hash[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]);
1108 dif = skb->dev->ifindex;
1109 sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
1111 struct sock *sknext = NULL;
1114 struct sk_buff *skb1 = skb;
1116 sknext = udp_v4_mcast_next(sk_next(sk), uh->dest, daddr,
1117 uh->source, saddr, dif);
1119 skb1 = skb_clone(skb, GFP_ATOMIC);
1122 int ret = udp_queue_rcv_skb(sk, skb1);
1124 /* we should probably re-process instead
1125 * of dropping packets here. */
1132 read_unlock(&udp_hash_lock);
1136 /* Initialize UDP checksum. If exited with zero value (success),
1137 * CHECKSUM_UNNECESSARY means, that no more checks are required.
1138 * Otherwise, csum completion requires chacksumming packet body,
1139 * including udp header and folding it to skb->csum.
1141 static int udp_checksum_init(struct sk_buff *skb, struct udphdr *uh,
1142 unsigned short ulen, u32 saddr, u32 daddr)
1144 if (uh->check == 0) {
1145 skb->ip_summed = CHECKSUM_UNNECESSARY;
1146 } else if (skb->ip_summed == CHECKSUM_HW) {
1147 skb->ip_summed = CHECKSUM_UNNECESSARY;
1148 if (!udp_check(uh, ulen, saddr, daddr, skb->csum))
1150 NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "udp v4 hw csum failure.\n"));
1151 skb->ip_summed = CHECKSUM_NONE;
1153 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
1154 skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
1155 /* Probably, we should checksum udp header (it should be in cache
1156 * in any case) and data in tiny packets (< rx copybreak).
1162 * All we need to do is get the socket, and then do a checksum.
1165 int udp_rcv(struct sk_buff *skb)
1169 unsigned short ulen;
1170 struct rtable *rt = (struct rtable*)skb->dst;
1171 u32 saddr = skb->nh.iph->saddr;
1172 u32 daddr = skb->nh.iph->daddr;
1176 * Validate the packet and the UDP length.
1178 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1183 ulen = ntohs(uh->len);
1185 if (ulen > len || ulen < sizeof(*uh))
1188 if (pskb_trim(skb, ulen))
1191 if (udp_checksum_init(skb, uh, ulen, saddr, daddr) < 0)
1194 if(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1195 return udp_v4_mcast_deliver(skb, uh, saddr, daddr);
1197 sk = udp_v4_lookup(saddr, uh->source, daddr, uh->dest, skb->dev->ifindex);
1200 int ret = udp_queue_rcv_skb(sk, skb);
1203 /* a return value > 0 means to resubmit the input, but
1204 * it it wants the return to be -protocol, or 0
1211 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1214 /* No socket. Drop packet silently, if checksum is wrong */
1215 if (udp_checksum_complete(skb))
1218 UDP_INC_STATS_BH(UdpNoPorts);
1219 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
1222 * Hmm. We got an UDP packet to a port to which we
1223 * don't wanna listen. Ignore it.
1229 NETDEBUG(if (net_ratelimit())
1230 printk(KERN_DEBUG "UDP: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n",
1238 UDP_INC_STATS_BH(UdpInErrors);
1244 * RFC1122: OK. Discards the bad packet silently (as far as
1245 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1247 NETDEBUG(if (net_ratelimit())
1248 printk(KERN_DEBUG "UDP: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n",
1255 UDP_INC_STATS_BH(UdpInErrors);
1260 static int udp_destroy_sock(struct sock *sk)
1263 udp_flush_pending_frames(sk);
1269 * Socket option code for UDP
1271 static int udp_setsockopt(struct sock *sk, int level, int optname,
1272 char __user *optval, int optlen)
1274 struct udp_opt *up = udp_sk(sk);
1278 if (level != SOL_UDP)
1279 return ip_setsockopt(sk, level, optname, optval, optlen);
1281 if(optlen<sizeof(int))
1284 if (get_user(val, (int __user *)optval))
1294 udp_push_pending_frames(sk, up);
1300 up->encap_type = val;
1311 static int udp_getsockopt(struct sock *sk, int level, int optname,
1312 char __user *optval, int __user *optlen)
1314 struct udp_opt *up = udp_sk(sk);
1317 if (level != SOL_UDP)
1318 return ip_getsockopt(sk, level, optname, optval, optlen);
1320 if(get_user(len,optlen))
1323 len = min_t(unsigned int, len, sizeof(int));
1334 val = up->encap_type;
1338 return -ENOPROTOOPT;
1341 if(put_user(len, optlen))
1343 if(copy_to_user(optval, &val,len))
1349 struct proto udp_prot = {
1352 .connect = udp_connect,
1353 .disconnect = udp_disconnect,
1355 .destroy = udp_destroy_sock,
1356 .setsockopt = udp_setsockopt,
1357 .getsockopt = udp_getsockopt,
1358 .sendmsg = udp_sendmsg,
1359 .recvmsg = udp_recvmsg,
1360 .sendpage = udp_sendpage,
1361 .backlog_rcv = udp_queue_rcv_skb,
1362 .hash = udp_v4_hash,
1363 .unhash = udp_v4_unhash,
1364 .get_port = udp_v4_get_port,
1367 /* ------------------------------------------------------------------------ */
1368 #ifdef CONFIG_PROC_FS
1370 static struct sock *udp_get_first(struct seq_file *seq)
1373 struct udp_iter_state *state = seq->private;
1375 for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
1376 struct hlist_node *node;
1377 sk_for_each(sk, node, &udp_hash[state->bucket]) {
1378 if (sk->sk_family == state->family)
1387 static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
1389 struct udp_iter_state *state = seq->private;
1395 } while (sk && sk->sk_family != state->family);
1397 if (!sk && ++state->bucket < UDP_HTABLE_SIZE) {
1398 sk = sk_head(&udp_hash[state->bucket]);
1404 static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
1406 struct sock *sk = udp_get_first(seq);
1409 while(pos && (sk = udp_get_next(seq, sk)) != NULL)
1411 return pos ? NULL : sk;
1414 static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
1416 read_lock(&udp_hash_lock);
1417 return *pos ? udp_get_idx(seq, *pos-1) : (void *)1;
1420 static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1425 sk = udp_get_idx(seq, 0);
1427 sk = udp_get_next(seq, v);
1433 static void udp_seq_stop(struct seq_file *seq, void *v)
1435 read_unlock(&udp_hash_lock);
1438 static int udp_seq_open(struct inode *inode, struct file *file)
1440 struct udp_seq_afinfo *afinfo = PDE(inode)->data;
1441 struct seq_file *seq;
1443 struct udp_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
1447 memset(s, 0, sizeof(*s));
1448 s->family = afinfo->family;
1449 s->seq_ops.start = udp_seq_start;
1450 s->seq_ops.next = udp_seq_next;
1451 s->seq_ops.show = afinfo->seq_show;
1452 s->seq_ops.stop = udp_seq_stop;
1454 rc = seq_open(file, &s->seq_ops);
1458 seq = file->private_data;
1467 /* ------------------------------------------------------------------------ */
1468 int udp_proc_register(struct udp_seq_afinfo *afinfo)
1470 struct proc_dir_entry *p;
1475 afinfo->seq_fops->owner = afinfo->owner;
1476 afinfo->seq_fops->open = udp_seq_open;
1477 afinfo->seq_fops->read = seq_read;
1478 afinfo->seq_fops->llseek = seq_lseek;
1479 afinfo->seq_fops->release = seq_release_private;
1481 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
1489 void udp_proc_unregister(struct udp_seq_afinfo *afinfo)
1493 proc_net_remove(afinfo->name);
1494 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
1497 /* ------------------------------------------------------------------------ */
1498 static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket)
1500 struct inet_opt *inet = inet_sk(sp);
1501 unsigned int dest = inet->daddr;
1502 unsigned int src = inet->rcv_saddr;
1503 __u16 destp = ntohs(inet->dport);
1504 __u16 srcp = ntohs(inet->sport);
1506 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1507 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
1508 bucket, src, srcp, dest, destp, sp->sk_state,
1509 atomic_read(&sp->sk_wmem_alloc),
1510 atomic_read(&sp->sk_rmem_alloc),
1511 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
1512 atomic_read(&sp->sk_refcnt), sp);
1515 static int udp4_seq_show(struct seq_file *seq, void *v)
1517 if (v == SEQ_START_TOKEN)
1518 seq_printf(seq, "%-127s\n",
1519 " sl local_address rem_address st tx_queue "
1520 "rx_queue tr tm->when retrnsmt uid timeout "
1524 struct udp_iter_state *state = seq->private;
1526 udp4_format_sock(v, tmpbuf, state->bucket);
1527 seq_printf(seq, "%-127s\n", tmpbuf);
1532 /* ------------------------------------------------------------------------ */
1533 static struct file_operations udp4_seq_fops;
1534 static struct udp_seq_afinfo udp4_seq_afinfo = {
1535 .owner = THIS_MODULE,
1538 .seq_show = udp4_seq_show,
1539 .seq_fops = &udp4_seq_fops,
1542 int __init udp4_proc_init(void)
1544 return udp_proc_register(&udp4_seq_afinfo);
1547 void udp4_proc_exit(void)
1549 udp_proc_unregister(&udp4_seq_afinfo);
1551 #endif /* CONFIG_PROC_FS */
1553 EXPORT_SYMBOL(udp_connect);
1554 EXPORT_SYMBOL(udp_disconnect);
1555 EXPORT_SYMBOL(udp_hash);
1556 EXPORT_SYMBOL(udp_hash_lock);
1557 EXPORT_SYMBOL(udp_ioctl);
1558 EXPORT_SYMBOL(udp_port_rover);
1559 EXPORT_SYMBOL(udp_prot);
1560 EXPORT_SYMBOL(udp_sendmsg);
1562 #ifdef CONFIG_PROC_FS
1563 EXPORT_SYMBOL(udp_proc_register);
1564 EXPORT_SYMBOL(udp_proc_unregister);