4 * Generic datagram handling routines. These are generic for all
5 * protocols. Possibly a generic IP version on top of these would
6 * make sense. Not tonight however 8-).
7 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
8 * NetROM layer all have identical poll code and mostly
9 * identical recvmsg() code. So we share it here. The poll was
10 * shared before but buried in udp.c so I moved it.
12 * Authors: Alan Cox <alan@redhat.com>. (datagram_poll() from old
16 * Alan Cox : NULL return from skb_peek_copy()
18 * Alan Cox : Rewrote skb_read_datagram to avoid the
19 * skb_peek_copy stuff.
20 * Alan Cox : Added support for SOCK_SEQPACKET.
21 * IPX can no longer use the SO_TYPE hack
22 * but AX.25 now works right, and SPX is
24 * Alan Cox : Fixed write poll of non IP protocol
26 * Florian La Roche: Changed for my new skbuff handling.
27 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
28 * Linus Torvalds : BSD semantic fixes.
29 * Alan Cox : Datagram iovec handling
30 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
31 * Alan Cox : POSIXisms
32 * Pete Wyckoff : Unconnected accept() fix.
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <asm/uaccess.h>
40 #include <asm/system.h>
42 #include <linux/interrupt.h>
43 #include <linux/errno.h>
44 #include <linux/sched.h>
45 #include <linux/inet.h>
46 #include <linux/tcp.h>
47 #include <linux/netdevice.h>
48 #include <linux/rtnetlink.h>
49 #include <linux/poll.h>
50 #include <linux/highmem.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
55 #include <net/checksum.h>
59 * Is a socket 'connection oriented' ?
61 static inline int connection_based(struct sock *sk)
63 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
69 static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
74 prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
77 error = sock_error(sk);
81 if (!skb_queue_empty(&sk->sk_receive_queue))
84 /* Socket shut down? */
85 if (sk->sk_shutdown & RCV_SHUTDOWN)
88 /* Sequenced packets can come disconnected.
89 * If so we report the problem
92 if (connection_based(sk) &&
93 !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
97 if (signal_pending(current))
101 *timeo_p = schedule_timeout(*timeo_p);
103 finish_wait(sk->sk_sleep, &wait);
106 error = sock_intr_errno(*timeo_p);
117 * skb_recv_datagram - Receive a datagram skbuff
119 * @flags - MSG_ flags
120 * @noblock - blocking operation?
121 * @err - error code returned
123 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
124 * and possible races. This replaces identical code in packet, raw and
125 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
126 * the long standing peek and read race for datagram sockets. If you
127 * alter this routine remember it must be re-entrant.
129 * This function will lock the socket if a skb is returned, so the caller
130 * needs to unlock the socket in that case (usually by calling
133 * * It does not lock socket since today. This function is
134 * * free of race conditions. This measure should/can improve
135 * * significantly datagram socket latencies at high loads,
136 * * when data copying to user space takes lots of time.
137 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
141 * The order of the tests when we find no data waiting are specified
142 * quite explicitly by POSIX 1003.1g, don't change them without having
143 * the standard around please.
145 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
146 int noblock, int *err)
151 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
153 int error = sock_error(sk);
158 timeo = sock_rcvtimeo(sk, noblock);
161 /* Again only user level code calls this function, so nothing
162 * interrupt level will suddenly eat the receive_queue.
164 * Look at current nfs client by the way...
165 * However, this function was corrent in any case. 8)
167 if (flags & MSG_PEEK) {
168 unsigned long cpu_flags;
170 spin_lock_irqsave(&sk->sk_receive_queue.lock,
172 skb = skb_peek(&sk->sk_receive_queue);
174 atomic_inc(&skb->users);
175 spin_unlock_irqrestore(&sk->sk_receive_queue.lock,
178 skb = skb_dequeue(&sk->sk_receive_queue);
183 /* User doesn't want to wait */
188 } while (!wait_for_packet(sk, err, &timeo));
197 void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
203 * Copy a datagram to a linear buffer.
205 int skb_copy_datagram(const struct sk_buff *skb, int offset, char __user *to, int size)
212 return skb_copy_datagram_iovec(skb, offset, &iov, size);
216 * skb_copy_datagram_iovec - Copy a datagram to an iovec.
217 * @skb - buffer to copy
218 * @offset - offset in the buffer to start copying from
219 * @iovec - io vector to copy to
220 * @len - amount of data to copy from buffer to iovec
222 * Note: the iovec is modified during the copy.
224 int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
225 struct iovec *to, int len)
227 int start = skb_headlen(skb);
228 int i, copy = start - offset;
234 if (memcpy_toiovec(to, skb->data + offset, copy))
236 if ((len -= copy) == 0)
241 /* Copy paged appendix. Hmm... why does this look so complicated? */
242 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
245 BUG_TRAP(start <= offset + len);
247 end = start + skb_shinfo(skb)->frags[i].size;
248 if ((copy = end - offset) > 0) {
251 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
252 struct page *page = frag->page;
257 err = memcpy_toiovec(to, vaddr + frag->page_offset +
258 offset - start, copy);
269 if (skb_shinfo(skb)->frag_list) {
270 struct sk_buff *list = skb_shinfo(skb)->frag_list;
272 for (; list; list = list->next) {
275 BUG_TRAP(start <= offset + len);
277 end = start + list->len;
278 if ((copy = end - offset) > 0) {
281 if (skb_copy_datagram_iovec(list,
285 if ((len -= copy) == 0)
299 int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
300 u8 __user *to, int len, unsigned int *csump)
302 int start = skb_headlen(skb);
304 int i, copy = start - offset;
311 *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
315 if ((len -= copy) == 0)
322 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
325 BUG_TRAP(start <= offset + len);
327 end = start + skb_shinfo(skb)->frags[i].size;
328 if ((copy = end - offset) > 0) {
332 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
333 struct page *page = frag->page;
338 csum2 = csum_and_copy_to_user(vaddr +
345 *csump = csum_block_add(*csump, csum2, pos);
355 if (skb_shinfo(skb)->frag_list) {
356 struct sk_buff *list = skb_shinfo(skb)->frag_list;
358 for (; list; list=list->next) {
361 BUG_TRAP(start <= offset + len);
363 end = start + list->len;
364 if ((copy = end - offset) > 0) {
365 unsigned int csum2 = 0;
368 if (skb_copy_and_csum_datagram(list,
373 *csump = csum_block_add(*csump, csum2, pos);
374 if ((len -= copy) == 0)
391 * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec.
393 * @hlen - hardware length
396 * Caller _must_ check that skb will fit to this iovec.
398 * Returns: 0 - success.
399 * -EINVAL - checksum failure.
400 * -EFAULT - fault during copy. Beware, in this case iovec
403 int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb,
404 int hlen, struct iovec *iov)
407 int chunk = skb->len - hlen;
409 /* Skip filled elements.
410 * Pretty silly, look at memcpy_toiovec, though 8)
412 while (!iov->iov_len)
415 if (iov->iov_len < chunk) {
416 if ((unsigned short)csum_fold(skb_checksum(skb, 0, chunk + hlen,
419 if (skb_copy_datagram_iovec(skb, hlen, iov, chunk))
422 csum = csum_partial(skb->data, hlen, skb->csum);
423 if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base,
426 if ((unsigned short)csum_fold(csum))
428 iov->iov_len -= chunk;
429 iov->iov_base += chunk;
439 * datagram_poll - generic datagram poll
440 * @file - file struct
444 * Datagram poll: Again totally generic. This also handles
445 * sequenced packet sockets providing the socket receive queue
446 * is only ever holding data ready to receive.
448 * Note: when you _don't_ use this routine for this protocol,
449 * and you use a different write policy from sock_writeable()
450 * then please supply your own write_space callback.
452 unsigned int datagram_poll(struct file *file, struct socket *sock,
455 struct sock *sk = sock->sk;
458 poll_wait(file, sk->sk_sleep, wait);
461 /* exceptional events? */
462 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
464 if (sk->sk_shutdown == SHUTDOWN_MASK)
468 if (!skb_queue_empty(&sk->sk_receive_queue) ||
469 (sk->sk_shutdown & RCV_SHUTDOWN))
470 mask |= POLLIN | POLLRDNORM;
472 /* Connection-based need to check for termination and startup */
473 if (connection_based(sk)) {
474 if (sk->sk_state == TCP_CLOSE)
476 /* connection hasn't started yet? */
477 if (sk->sk_state == TCP_SYN_SENT)
482 if (sock_writeable(sk))
483 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
485 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
490 EXPORT_SYMBOL(datagram_poll);
491 EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec);
492 EXPORT_SYMBOL(skb_copy_datagram);
493 EXPORT_SYMBOL(skb_copy_datagram_iovec);
494 EXPORT_SYMBOL(skb_free_datagram);
495 EXPORT_SYMBOL(skb_recv_datagram);