2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
8 * Version: $Id: af_packet.c,v 1.61 2002/02/08 03:57:19 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
15 * Alan Cox : verify_area() now used correctly
16 * Alan Cox : new skbuff lists, look ma no backlogs!
17 * Alan Cox : tidied skbuff lists.
18 * Alan Cox : Now uses generic datagram routines I
19 * added. Also fixed the peek/read crash
20 * from all old Linux datagram code.
21 * Alan Cox : Uses the improved datagram code.
22 * Alan Cox : Added NULL's for socket options.
23 * Alan Cox : Re-commented the code.
24 * Alan Cox : Use new kernel side addressing
25 * Rob Janssen : Correct MTU usage.
26 * Dave Platt : Counter leaks caused by incorrect
27 * interrupt locking and some slightly
28 * dubious gcc output. Can you read
29 * compiler: it said _VOLATILE_
30 * Richard Kooijman : Timestamp fixes.
31 * Alan Cox : New buffers. Use sk->mac.raw.
32 * Alan Cox : sendmsg/recvmsg support.
33 * Alan Cox : Protocol setting support
34 * Alexey Kuznetsov : Untied from IPv4 stack.
35 * Cyrus Durgin : Fixed kerneld for kmod.
36 * Michal Ostrowski : Module initialization cleanup.
37 * Ulises Alonso : Frame number limit removal and
38 * packet_set_ring memory leak.
39 * Eric Biederman : Allow for > 8 byte hardware addresses.
40 * The convention is that longer addresses
41 * will simply extend the hardware address
42 * byte arrays at the end of sockaddr_ll
45 * This program is free software; you can redistribute it and/or
46 * modify it under the terms of the GNU General Public License
47 * as published by the Free Software Foundation; either version
48 * 2 of the License, or (at your option) any later version.
52 #include <linux/types.h>
53 #include <linux/sched.h>
55 #include <linux/capability.h>
56 #include <linux/fcntl.h>
57 #include <linux/socket.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/if_packet.h>
62 #include <linux/wireless.h>
63 #include <linux/kernel.h>
64 #include <linux/kmod.h>
66 #include <net/protocol.h>
67 #include <linux/skbuff.h>
69 #include <linux/errno.h>
70 #include <linux/timer.h>
71 #include <asm/system.h>
72 #include <asm/uaccess.h>
73 #include <asm/ioctls.h>
75 #include <asm/cacheflush.h>
77 #include <linux/proc_fs.h>
78 #include <linux/seq_file.h>
79 #include <linux/poll.h>
80 #include <linux/module.h>
81 #include <linux/init.h>
84 #include <net/inet_common.h>
87 #define CONFIG_SOCK_PACKET 1
90 Proposed replacement for SIOC{ADD,DEL}MULTI and
91 IFF_PROMISC, IFF_ALLMULTI flags.
93 It is more expensive, but I believe,
94 it is really correct solution: reentereble, safe and fault tolerant.
96 IFF_PROMISC/IFF_ALLMULTI/SIOC{ADD/DEL}MULTI are faked by keeping
97 reference count and global flag, so that real status is
98 (gflag|(count != 0)), so that we can use obsolete faulty interface
99 not harming clever users.
101 #define CONFIG_PACKET_MULTICAST 1
105 - if device has no dev->hard_header routine, it adds and removes ll header
106 inside itself. In this case ll header is invisible outside of device,
107 but higher levels still should reserve dev->hard_header_len.
108 Some devices are enough clever to reallocate skb, when header
109 will not fit to reserved space (tunnel), another ones are silly
111 - packet socket receives packets with pulled ll header,
112 so that SOCK_RAW should push it back.
117 Incoming, dev->hard_header!=NULL
121 Outgoing, dev->hard_header!=NULL
125 Incoming, dev->hard_header==NULL
126 mac.raw -> UNKNOWN position. It is very likely, that it points to ll header.
127 PPP makes it, that is wrong, because introduce assymetry
128 between rx and tx paths.
131 Outgoing, dev->hard_header==NULL
132 mac.raw -> data. ll header is still not built!
136 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
142 dev->hard_header != NULL
146 dev->hard_header == NULL (ll header is added by device, we cannot control it)
150 We should set nh.raw on output to correct posistion,
151 packet classifier depends on it.
154 /* List of all packet sockets. */
155 static HLIST_HEAD(packet_sklist);
156 static DEFINE_RWLOCK(packet_sklist_lock);
158 static atomic_t packet_socks_nr;
161 /* Private packet socket structures. */
163 #ifdef CONFIG_PACKET_MULTICAST
166 struct packet_mclist *next;
171 unsigned char addr[MAX_ADDR_LEN];
173 /* identical to struct packet_mreq except it has
174 * a longer address field.
176 struct packet_mreq_max
179 unsigned short mr_type;
180 unsigned short mr_alen;
181 unsigned char mr_address[MAX_ADDR_LEN];
184 #ifdef CONFIG_PACKET_MMAP
185 static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing);
188 static void packet_flush_mclist(struct sock *sk);
191 /* struct sock has to be the first member of packet_sock */
193 struct tpacket_stats stats;
194 #ifdef CONFIG_PACKET_MMAP
197 unsigned int frames_per_block;
198 unsigned int frame_size;
199 unsigned int frame_max;
202 struct packet_type prot_hook;
203 spinlock_t bind_lock;
204 unsigned int running:1, /* prot_hook is attached*/
206 int ifindex; /* bound device */
208 #ifdef CONFIG_PACKET_MULTICAST
209 struct packet_mclist *mclist;
211 #ifdef CONFIG_PACKET_MMAP
213 unsigned int pg_vec_order;
214 unsigned int pg_vec_pages;
215 unsigned int pg_vec_len;
219 struct packet_skb_cb {
220 unsigned int origlen;
222 struct sockaddr_pkt pkt;
223 struct sockaddr_ll ll;
227 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
229 #ifdef CONFIG_PACKET_MMAP
231 static inline char *packet_lookup_frame(struct packet_sock *po, unsigned int position)
233 unsigned int pg_vec_pos, frame_offset;
236 pg_vec_pos = position / po->frames_per_block;
237 frame_offset = position % po->frames_per_block;
239 frame = po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size);
245 static inline struct packet_sock *pkt_sk(struct sock *sk)
247 return (struct packet_sock *)sk;
250 static void packet_sock_destruct(struct sock *sk)
252 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
253 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
255 if (!sock_flag(sk, SOCK_DEAD)) {
256 printk("Attempt to release alive packet socket: %p\n", sk);
260 atomic_dec(&packet_socks_nr);
261 #ifdef PACKET_REFCNT_DEBUG
262 printk(KERN_DEBUG "PACKET socket %p is free, %d are alive\n", sk, atomic_read(&packet_socks_nr));
268 struct proto_ops packet_ops;
270 #ifdef CONFIG_SOCK_PACKET
271 static const struct proto_ops packet_ops_spkt;
273 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
276 struct sockaddr_pkt *spkt;
279 * When we registered the protocol we saved the socket in the data
280 * field for just this event.
283 sk = pt->af_packet_priv;
286 * Yank back the headers [hope the device set this
287 * right or kerboom...]
289 * Incoming packets have ll header pulled,
292 * For outgoing ones skb->data == skb->mac.raw
293 * so that this procedure is noop.
296 if (skb->pkt_type == PACKET_LOOPBACK)
299 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
302 /* drop any routing info */
303 dst_release(skb->dst);
306 /* drop conntrack reference */
309 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
311 skb_push(skb, skb->data-skb->mac.raw);
314 * The SOCK_PACKET socket receives _all_ frames.
317 spkt->spkt_family = dev->type;
318 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
319 spkt->spkt_protocol = skb->protocol;
322 * Charge the memory to the socket. This is done specifically
323 * to prevent sockets using all the memory up.
326 if (sock_queue_rcv_skb(sk,skb) == 0)
337 * Output a raw packet to a device layer. This bypasses all the other
338 * protocol layers and you must therefore supply it with a complete frame
341 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
342 struct msghdr *msg, size_t len)
344 struct sock *sk = sock->sk;
345 struct sockaddr_pkt *saddr=(struct sockaddr_pkt *)msg->msg_name;
347 struct net_device *dev;
352 * Get and verify the address.
357 if (msg->msg_namelen < sizeof(struct sockaddr))
359 if (msg->msg_namelen==sizeof(struct sockaddr_pkt))
360 proto=saddr->spkt_protocol;
363 return(-ENOTCONN); /* SOCK_PACKET must be sent giving an address */
366 * Find the device first to size check it
369 saddr->spkt_device[13] = 0;
370 dev = dev_get_by_name(saddr->spkt_device);
376 if (!(dev->flags & IFF_UP))
380 * You may not queue a frame bigger than the mtu. This is the lowest level
381 * raw protocol and you must do your own fragmentation at this level.
385 if (len > dev->mtu + dev->hard_header_len)
389 skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL);
392 * If the write buffer is full, then tough. At this level the user gets to
393 * deal with the problem - do your own algorithmic backoffs. That's far
404 /* FIXME: Save some space for broken drivers that write a
405 * hard header at transmission time by themselves. PPP is the
406 * notable one here. This should really be fixed at the driver level.
408 skb_reserve(skb, LL_RESERVED_SPACE(dev));
409 skb->nh.raw = skb->data;
411 /* Try to align data part correctly */
412 if (dev->hard_header) {
413 skb->data -= dev->hard_header_len;
414 skb->tail -= dev->hard_header_len;
415 if (len < dev->hard_header_len)
416 skb->nh.raw = skb->data;
419 /* Returns -EFAULT on error */
420 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
421 skb->protocol = proto;
423 skb->priority = sk->sk_priority;
444 static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
447 struct sk_filter *filter;
450 filter = rcu_dereference(sk->sk_filter);
452 res = sk_run_filter(skb, filter->insns, filter->len);
453 rcu_read_unlock_bh();
459 This function makes lazy skb cloning in hope that most of packets
460 are discarded by BPF.
462 Note tricky part: we DO mangle shared skb! skb->data, skb->len
463 and skb->cb are mangled. It works because (and until) packets
464 falling here are owned by current CPU. Output packets are cloned
465 by dev_queue_xmit_nit(), input packets are processed by net_bh
466 sequencially, so that if we return skb to original state on exit,
467 we will not harm anyone.
470 static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
473 struct sockaddr_ll *sll;
474 struct packet_sock *po;
475 u8 * skb_head = skb->data;
476 int skb_len = skb->len;
477 unsigned int snaplen, res;
479 if (skb->pkt_type == PACKET_LOOPBACK)
482 sk = pt->af_packet_priv;
487 if (dev->hard_header) {
488 /* The device has an explicit notion of ll header,
489 exported to higher levels.
491 Otherwise, the device hides datails of it frame
492 structure, so that corresponding packet head
493 never delivered to user.
495 if (sk->sk_type != SOCK_DGRAM)
496 skb_push(skb, skb->data - skb->mac.raw);
497 else if (skb->pkt_type == PACKET_OUTGOING) {
498 /* Special case: outgoing packets have ll header at head */
499 skb_pull(skb, skb->nh.raw - skb->data);
505 res = run_filter(skb, sk, snaplen);
511 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
512 (unsigned)sk->sk_rcvbuf)
515 if (skb_shared(skb)) {
516 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
520 if (skb_head != skb->data) {
521 skb->data = skb_head;
528 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
531 sll = &PACKET_SKB_CB(skb)->sa.ll;
532 sll->sll_family = AF_PACKET;
533 sll->sll_hatype = dev->type;
534 sll->sll_protocol = skb->protocol;
535 sll->sll_pkttype = skb->pkt_type;
536 sll->sll_ifindex = dev->ifindex;
539 if (dev->hard_header_parse)
540 sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr);
542 PACKET_SKB_CB(skb)->origlen = skb->len;
544 if (pskb_trim(skb, snaplen))
547 skb_set_owner_r(skb, sk);
549 dst_release(skb->dst);
552 /* drop conntrack reference */
555 spin_lock(&sk->sk_receive_queue.lock);
556 po->stats.tp_packets++;
557 __skb_queue_tail(&sk->sk_receive_queue, skb);
558 spin_unlock(&sk->sk_receive_queue.lock);
559 sk->sk_data_ready(sk, skb->len);
563 spin_lock(&sk->sk_receive_queue.lock);
564 po->stats.tp_drops++;
565 spin_unlock(&sk->sk_receive_queue.lock);
568 if (skb_head != skb->data && skb_shared(skb)) {
569 skb->data = skb_head;
577 #ifdef CONFIG_PACKET_MMAP
578 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
581 struct packet_sock *po;
582 struct sockaddr_ll *sll;
583 struct tpacket_hdr *h;
584 u8 * skb_head = skb->data;
585 int skb_len = skb->len;
586 unsigned int snaplen, res;
587 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
588 unsigned short macoff, netoff;
589 struct sk_buff *copy_skb = NULL;
591 if (skb->pkt_type == PACKET_LOOPBACK)
594 sk = pt->af_packet_priv;
597 if (dev->hard_header) {
598 if (sk->sk_type != SOCK_DGRAM)
599 skb_push(skb, skb->data - skb->mac.raw);
600 else if (skb->pkt_type == PACKET_OUTGOING) {
601 /* Special case: outgoing packets have ll header at head */
602 skb_pull(skb, skb->nh.raw - skb->data);
606 if (skb->ip_summed == CHECKSUM_PARTIAL)
607 status |= TP_STATUS_CSUMNOTREADY;
611 res = run_filter(skb, sk, snaplen);
617 if (sk->sk_type == SOCK_DGRAM) {
618 macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16;
620 unsigned maclen = skb->nh.raw - skb->data;
621 netoff = TPACKET_ALIGN(TPACKET_HDRLEN + (maclen < 16 ? 16 : maclen));
622 macoff = netoff - maclen;
625 if (macoff + snaplen > po->frame_size) {
626 if (po->copy_thresh &&
627 atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
628 (unsigned)sk->sk_rcvbuf) {
629 if (skb_shared(skb)) {
630 copy_skb = skb_clone(skb, GFP_ATOMIC);
632 copy_skb = skb_get(skb);
633 skb_head = skb->data;
636 skb_set_owner_r(copy_skb, sk);
638 snaplen = po->frame_size - macoff;
639 if ((int)snaplen < 0)
643 spin_lock(&sk->sk_receive_queue.lock);
644 h = (struct tpacket_hdr *)packet_lookup_frame(po, po->head);
648 po->head = po->head != po->frame_max ? po->head+1 : 0;
649 po->stats.tp_packets++;
651 status |= TP_STATUS_COPY;
652 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
654 if (!po->stats.tp_drops)
655 status &= ~TP_STATUS_LOSING;
656 spin_unlock(&sk->sk_receive_queue.lock);
658 skb_copy_bits(skb, 0, (u8*)h + macoff, snaplen);
660 h->tp_len = skb->len;
661 h->tp_snaplen = snaplen;
664 if (skb->tstamp.off_sec == 0) {
665 __net_timestamp(skb);
666 sock_enable_timestamp(sk);
668 h->tp_sec = skb->tstamp.off_sec;
669 h->tp_usec = skb->tstamp.off_usec;
671 sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h)));
673 if (dev->hard_header_parse)
674 sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr);
675 sll->sll_family = AF_PACKET;
676 sll->sll_hatype = dev->type;
677 sll->sll_protocol = skb->protocol;
678 sll->sll_pkttype = skb->pkt_type;
679 sll->sll_ifindex = dev->ifindex;
681 h->tp_status = status;
685 struct page *p_start, *p_end;
686 u8 *h_end = (u8 *)h + macoff + snaplen - 1;
688 p_start = virt_to_page(h);
689 p_end = virt_to_page(h_end);
690 while (p_start <= p_end) {
691 flush_dcache_page(p_start);
696 sk->sk_data_ready(sk, 0);
699 if (skb_head != skb->data && skb_shared(skb)) {
700 skb->data = skb_head;
708 po->stats.tp_drops++;
709 spin_unlock(&sk->sk_receive_queue.lock);
711 sk->sk_data_ready(sk, 0);
720 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
721 struct msghdr *msg, size_t len)
723 struct sock *sk = sock->sk;
724 struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name;
726 struct net_device *dev;
729 int ifindex, err, reserve = 0;
732 * Get and verify the address.
736 struct packet_sock *po = pkt_sk(sk);
738 ifindex = po->ifindex;
743 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
745 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
747 ifindex = saddr->sll_ifindex;
748 proto = saddr->sll_protocol;
749 addr = saddr->sll_addr;
753 dev = dev_get_by_index(ifindex);
757 if (sock->type == SOCK_RAW)
758 reserve = dev->hard_header_len;
761 if (!(dev->flags & IFF_UP))
765 if (len > dev->mtu+reserve)
768 skb = sock_alloc_send_skb(sk, len + LL_RESERVED_SPACE(dev),
769 msg->msg_flags & MSG_DONTWAIT, &err);
773 skb_reserve(skb, LL_RESERVED_SPACE(dev));
774 skb->nh.raw = skb->data;
776 if (dev->hard_header) {
779 res = dev->hard_header(skb, dev, ntohs(proto), addr, NULL, len);
780 if (sock->type != SOCK_DGRAM) {
781 skb->tail = skb->data;
787 /* Returns -EFAULT on error */
788 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
792 skb->protocol = proto;
794 skb->priority = sk->sk_priority;
800 err = dev_queue_xmit(skb);
801 if (err > 0 && (err = net_xmit_errno(err)) != 0)
818 * Close a PACKET socket. This is fairly simple. We immediately go
819 * to 'closed' state and remove our protocol entry in the device list.
822 static int packet_release(struct socket *sock)
824 struct sock *sk = sock->sk;
825 struct packet_sock *po;
832 write_lock_bh(&packet_sklist_lock);
833 sk_del_node_init(sk);
834 write_unlock_bh(&packet_sklist_lock);
837 * Unhook packet receive handler.
842 * Remove the protocol hook
844 dev_remove_pack(&po->prot_hook);
850 #ifdef CONFIG_PACKET_MULTICAST
851 packet_flush_mclist(sk);
854 #ifdef CONFIG_PACKET_MMAP
856 struct tpacket_req req;
857 memset(&req, 0, sizeof(req));
858 packet_set_ring(sk, &req, 1);
863 * Now the socket is dead. No more input will appear.
871 skb_queue_purge(&sk->sk_receive_queue);
878 * Attach a packet hook.
881 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
883 struct packet_sock *po = pkt_sk(sk);
885 * Detach an existing hook if present.
890 spin_lock(&po->bind_lock);
895 spin_unlock(&po->bind_lock);
896 dev_remove_pack(&po->prot_hook);
897 spin_lock(&po->bind_lock);
901 po->prot_hook.type = protocol;
902 po->prot_hook.dev = dev;
904 po->ifindex = dev ? dev->ifindex : 0;
910 if (dev->flags&IFF_UP) {
911 dev_add_pack(&po->prot_hook);
915 sk->sk_err = ENETDOWN;
916 if (!sock_flag(sk, SOCK_DEAD))
917 sk->sk_error_report(sk);
920 dev_add_pack(&po->prot_hook);
926 spin_unlock(&po->bind_lock);
932 * Bind a packet socket to a device
935 #ifdef CONFIG_SOCK_PACKET
937 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len)
939 struct sock *sk=sock->sk;
941 struct net_device *dev;
948 if (addr_len != sizeof(struct sockaddr))
950 strlcpy(name,uaddr->sa_data,sizeof(name));
952 dev = dev_get_by_name(name);
954 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
961 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
963 struct sockaddr_ll *sll = (struct sockaddr_ll*)uaddr;
964 struct sock *sk=sock->sk;
965 struct net_device *dev = NULL;
973 if (addr_len < sizeof(struct sockaddr_ll))
975 if (sll->sll_family != AF_PACKET)
978 if (sll->sll_ifindex) {
980 dev = dev_get_by_index(sll->sll_ifindex);
984 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
992 static struct proto packet_proto = {
994 .owner = THIS_MODULE,
995 .obj_size = sizeof(struct packet_sock),
999 * Create a packet of type SOCK_PACKET.
1002 static int packet_create(struct socket *sock, int protocol)
1005 struct packet_sock *po;
1006 __be16 proto = (__force __be16)protocol; /* weird, but documented */
1009 if (!capable(CAP_NET_RAW))
1011 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW
1012 #ifdef CONFIG_SOCK_PACKET
1013 && sock->type != SOCK_PACKET
1016 return -ESOCKTNOSUPPORT;
1018 sock->state = SS_UNCONNECTED;
1021 sk = sk_alloc(PF_PACKET, GFP_KERNEL, &packet_proto, 1);
1025 sock->ops = &packet_ops;
1026 #ifdef CONFIG_SOCK_PACKET
1027 if (sock->type == SOCK_PACKET)
1028 sock->ops = &packet_ops_spkt;
1030 sock_init_data(sock, sk);
1033 sk->sk_family = PF_PACKET;
1036 sk->sk_destruct = packet_sock_destruct;
1037 atomic_inc(&packet_socks_nr);
1040 * Attach a protocol block
1043 spin_lock_init(&po->bind_lock);
1044 po->prot_hook.func = packet_rcv;
1045 #ifdef CONFIG_SOCK_PACKET
1046 if (sock->type == SOCK_PACKET)
1047 po->prot_hook.func = packet_rcv_spkt;
1049 po->prot_hook.af_packet_priv = sk;
1052 po->prot_hook.type = proto;
1053 dev_add_pack(&po->prot_hook);
1058 write_lock_bh(&packet_sklist_lock);
1059 sk_add_node(sk, &packet_sklist);
1060 write_unlock_bh(&packet_sklist_lock);
1067 * Pull a packet from our receive queue and hand it to the user.
1068 * If necessary we block.
1071 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1072 struct msghdr *msg, size_t len, int flags)
1074 struct sock *sk = sock->sk;
1075 struct sk_buff *skb;
1077 struct sockaddr_ll *sll;
1080 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
1084 /* What error should we return now? EUNATTACH? */
1085 if (pkt_sk(sk)->ifindex < 0)
1090 * Call the generic datagram receiver. This handles all sorts
1091 * of horrible races and re-entrancy so we can forget about it
1092 * in the protocol layers.
1094 * Now it will return ENETDOWN, if device have just gone down,
1095 * but then it will block.
1098 skb=skb_recv_datagram(sk,flags,flags&MSG_DONTWAIT,&err);
1101 * An error occurred so return it. Because skb_recv_datagram()
1102 * handles the blocking we don't see and worry about blocking
1110 * If the address length field is there to be filled in, we fill
1114 sll = &PACKET_SKB_CB(skb)->sa.ll;
1115 if (sock->type == SOCK_PACKET)
1116 msg->msg_namelen = sizeof(struct sockaddr_pkt);
1118 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
1121 * You lose any data beyond the buffer you gave. If it worries a
1122 * user program they can ask the device for its MTU anyway.
1129 msg->msg_flags|=MSG_TRUNC;
1132 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1136 sock_recv_timestamp(msg, sk, skb);
1139 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
1142 if (pkt_sk(sk)->auxdata) {
1143 struct tpacket_auxdata aux;
1145 aux.tp_status = TP_STATUS_USER;
1146 if (skb->ip_summed == CHECKSUM_PARTIAL)
1147 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
1148 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
1149 aux.tp_snaplen = skb->len;
1151 aux.tp_net = skb->nh.raw - skb->data;
1153 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1157 * Free or return the buffer as appropriate. Again this
1158 * hides all the races and re-entrancy issues from us.
1160 err = (flags&MSG_TRUNC) ? skb->len : copied;
1163 skb_free_datagram(sk, skb);
1168 #ifdef CONFIG_SOCK_PACKET
1169 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1170 int *uaddr_len, int peer)
1172 struct net_device *dev;
1173 struct sock *sk = sock->sk;
1178 uaddr->sa_family = AF_PACKET;
1179 dev = dev_get_by_index(pkt_sk(sk)->ifindex);
1181 strlcpy(uaddr->sa_data, dev->name, 15);
1184 memset(uaddr->sa_data, 0, 14);
1185 *uaddr_len = sizeof(*uaddr);
1191 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1192 int *uaddr_len, int peer)
1194 struct net_device *dev;
1195 struct sock *sk = sock->sk;
1196 struct packet_sock *po = pkt_sk(sk);
1197 struct sockaddr_ll *sll = (struct sockaddr_ll*)uaddr;
1202 sll->sll_family = AF_PACKET;
1203 sll->sll_ifindex = po->ifindex;
1204 sll->sll_protocol = po->num;
1205 dev = dev_get_by_index(po->ifindex);
1207 sll->sll_hatype = dev->type;
1208 sll->sll_halen = dev->addr_len;
1209 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1212 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
1215 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1220 #ifdef CONFIG_PACKET_MULTICAST
1221 static void packet_dev_mc(struct net_device *dev, struct packet_mclist *i, int what)
1224 case PACKET_MR_MULTICAST:
1226 dev_mc_add(dev, i->addr, i->alen, 0);
1228 dev_mc_delete(dev, i->addr, i->alen, 0);
1230 case PACKET_MR_PROMISC:
1231 dev_set_promiscuity(dev, what);
1233 case PACKET_MR_ALLMULTI:
1234 dev_set_allmulti(dev, what);
1240 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
1242 for ( ; i; i=i->next) {
1243 if (i->ifindex == dev->ifindex)
1244 packet_dev_mc(dev, i, what);
1248 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1250 struct packet_sock *po = pkt_sk(sk);
1251 struct packet_mclist *ml, *i;
1252 struct net_device *dev;
1258 dev = __dev_get_by_index(mreq->mr_ifindex);
1263 if (mreq->mr_alen > dev->addr_len)
1267 i = kmalloc(sizeof(*i), GFP_KERNEL);
1272 for (ml = po->mclist; ml; ml = ml->next) {
1273 if (ml->ifindex == mreq->mr_ifindex &&
1274 ml->type == mreq->mr_type &&
1275 ml->alen == mreq->mr_alen &&
1276 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1278 /* Free the new element ... */
1284 i->type = mreq->mr_type;
1285 i->ifindex = mreq->mr_ifindex;
1286 i->alen = mreq->mr_alen;
1287 memcpy(i->addr, mreq->mr_address, i->alen);
1289 i->next = po->mclist;
1291 packet_dev_mc(dev, i, +1);
1298 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1300 struct packet_mclist *ml, **mlp;
1304 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
1305 if (ml->ifindex == mreq->mr_ifindex &&
1306 ml->type == mreq->mr_type &&
1307 ml->alen == mreq->mr_alen &&
1308 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1309 if (--ml->count == 0) {
1310 struct net_device *dev;
1312 dev = dev_get_by_index(ml->ifindex);
1314 packet_dev_mc(dev, ml, -1);
1324 return -EADDRNOTAVAIL;
1327 static void packet_flush_mclist(struct sock *sk)
1329 struct packet_sock *po = pkt_sk(sk);
1330 struct packet_mclist *ml;
1336 while ((ml = po->mclist) != NULL) {
1337 struct net_device *dev;
1339 po->mclist = ml->next;
1340 if ((dev = dev_get_by_index(ml->ifindex)) != NULL) {
1341 packet_dev_mc(dev, ml, -1);
1351 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1353 struct sock *sk = sock->sk;
1354 struct packet_sock *po = pkt_sk(sk);
1357 if (level != SOL_PACKET)
1358 return -ENOPROTOOPT;
1361 #ifdef CONFIG_PACKET_MULTICAST
1362 case PACKET_ADD_MEMBERSHIP:
1363 case PACKET_DROP_MEMBERSHIP:
1365 struct packet_mreq_max mreq;
1367 memset(&mreq, 0, sizeof(mreq));
1368 if (len < sizeof(struct packet_mreq))
1370 if (len > sizeof(mreq))
1372 if (copy_from_user(&mreq,optval,len))
1374 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
1376 if (optname == PACKET_ADD_MEMBERSHIP)
1377 ret = packet_mc_add(sk, &mreq);
1379 ret = packet_mc_drop(sk, &mreq);
1383 #ifdef CONFIG_PACKET_MMAP
1384 case PACKET_RX_RING:
1386 struct tpacket_req req;
1388 if (optlen<sizeof(req))
1390 if (copy_from_user(&req,optval,sizeof(req)))
1392 return packet_set_ring(sk, &req, 0);
1394 case PACKET_COPY_THRESH:
1398 if (optlen!=sizeof(val))
1400 if (copy_from_user(&val,optval,sizeof(val)))
1403 pkt_sk(sk)->copy_thresh = val;
1407 case PACKET_AUXDATA:
1411 if (optlen < sizeof(val))
1413 if (copy_from_user(&val, optval, sizeof(val)))
1416 po->auxdata = !!val;
1420 return -ENOPROTOOPT;
1424 static int packet_getsockopt(struct socket *sock, int level, int optname,
1425 char __user *optval, int __user *optlen)
1429 struct sock *sk = sock->sk;
1430 struct packet_sock *po = pkt_sk(sk);
1432 struct tpacket_stats st;
1434 if (level != SOL_PACKET)
1435 return -ENOPROTOOPT;
1437 if (get_user(len, optlen))
1444 case PACKET_STATISTICS:
1445 if (len > sizeof(struct tpacket_stats))
1446 len = sizeof(struct tpacket_stats);
1447 spin_lock_bh(&sk->sk_receive_queue.lock);
1449 memset(&po->stats, 0, sizeof(st));
1450 spin_unlock_bh(&sk->sk_receive_queue.lock);
1451 st.tp_packets += st.tp_drops;
1455 case PACKET_AUXDATA:
1456 if (len > sizeof(int))
1463 return -ENOPROTOOPT;
1466 if (put_user(len, optlen))
1468 if (copy_to_user(optval, data, len))
1474 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
1477 struct hlist_node *node;
1478 struct net_device *dev = (struct net_device*)data;
1480 read_lock(&packet_sklist_lock);
1481 sk_for_each(sk, node, &packet_sklist) {
1482 struct packet_sock *po = pkt_sk(sk);
1485 case NETDEV_UNREGISTER:
1486 #ifdef CONFIG_PACKET_MULTICAST
1488 packet_dev_mclist(dev, po->mclist, -1);
1492 if (dev->ifindex == po->ifindex) {
1493 spin_lock(&po->bind_lock);
1495 __dev_remove_pack(&po->prot_hook);
1498 sk->sk_err = ENETDOWN;
1499 if (!sock_flag(sk, SOCK_DEAD))
1500 sk->sk_error_report(sk);
1502 if (msg == NETDEV_UNREGISTER) {
1504 po->prot_hook.dev = NULL;
1506 spin_unlock(&po->bind_lock);
1510 spin_lock(&po->bind_lock);
1511 if (dev->ifindex == po->ifindex && po->num &&
1513 dev_add_pack(&po->prot_hook);
1517 spin_unlock(&po->bind_lock);
1521 read_unlock(&packet_sklist_lock);
1526 static int packet_ioctl(struct socket *sock, unsigned int cmd,
1529 struct sock *sk = sock->sk;
1534 int amount = atomic_read(&sk->sk_wmem_alloc);
1535 return put_user(amount, (int __user *)arg);
1539 struct sk_buff *skb;
1542 spin_lock_bh(&sk->sk_receive_queue.lock);
1543 skb = skb_peek(&sk->sk_receive_queue);
1546 spin_unlock_bh(&sk->sk_receive_queue.lock);
1547 return put_user(amount, (int __user *)arg);
1550 return sock_get_timestamp(sk, (struct timeval __user *)arg);
1560 case SIOCGIFBRDADDR:
1561 case SIOCSIFBRDADDR:
1562 case SIOCGIFNETMASK:
1563 case SIOCSIFNETMASK:
1564 case SIOCGIFDSTADDR:
1565 case SIOCSIFDSTADDR:
1567 return inet_dgram_ops.ioctl(sock, cmd, arg);
1571 return -ENOIOCTLCMD;
1576 #ifndef CONFIG_PACKET_MMAP
1577 #define packet_mmap sock_no_mmap
1578 #define packet_poll datagram_poll
1581 static unsigned int packet_poll(struct file * file, struct socket *sock,
1584 struct sock *sk = sock->sk;
1585 struct packet_sock *po = pkt_sk(sk);
1586 unsigned int mask = datagram_poll(file, sock, wait);
1588 spin_lock_bh(&sk->sk_receive_queue.lock);
1590 unsigned last = po->head ? po->head-1 : po->frame_max;
1591 struct tpacket_hdr *h;
1593 h = (struct tpacket_hdr *)packet_lookup_frame(po, last);
1596 mask |= POLLIN | POLLRDNORM;
1598 spin_unlock_bh(&sk->sk_receive_queue.lock);
1603 /* Dirty? Well, I still did not learn better way to account
1607 static void packet_mm_open(struct vm_area_struct *vma)
1609 struct file *file = vma->vm_file;
1610 struct socket * sock = file->private_data;
1611 struct sock *sk = sock->sk;
1614 atomic_inc(&pkt_sk(sk)->mapped);
1617 static void packet_mm_close(struct vm_area_struct *vma)
1619 struct file *file = vma->vm_file;
1620 struct socket * sock = file->private_data;
1621 struct sock *sk = sock->sk;
1624 atomic_dec(&pkt_sk(sk)->mapped);
1627 static struct vm_operations_struct packet_mmap_ops = {
1628 .open = packet_mm_open,
1629 .close =packet_mm_close,
1632 static inline struct page *pg_vec_endpage(char *one_pg_vec, unsigned int order)
1634 return virt_to_page(one_pg_vec + (PAGE_SIZE << order) - 1);
1637 static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
1641 for (i = 0; i < len; i++) {
1642 if (likely(pg_vec[i]))
1643 free_pages((unsigned long) pg_vec[i], order);
1648 static inline char *alloc_one_pg_vec_page(unsigned long order)
1650 return (char *) __get_free_pages(GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
1654 static char **alloc_pg_vec(struct tpacket_req *req, int order)
1656 unsigned int block_nr = req->tp_block_nr;
1660 pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL);
1661 if (unlikely(!pg_vec))
1664 for (i = 0; i < block_nr; i++) {
1665 pg_vec[i] = alloc_one_pg_vec_page(order);
1666 if (unlikely(!pg_vec[i]))
1667 goto out_free_pgvec;
1674 free_pg_vec(pg_vec, order, block_nr);
1679 static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing)
1681 char **pg_vec = NULL;
1682 struct packet_sock *po = pkt_sk(sk);
1683 int was_running, order = 0;
1687 if (req->tp_block_nr) {
1690 /* Sanity tests and some calculations */
1692 if (unlikely(po->pg_vec))
1695 if (unlikely((int)req->tp_block_size <= 0))
1697 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
1699 if (unlikely(req->tp_frame_size < TPACKET_HDRLEN))
1701 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
1704 po->frames_per_block = req->tp_block_size/req->tp_frame_size;
1705 if (unlikely(po->frames_per_block <= 0))
1707 if (unlikely((po->frames_per_block * req->tp_block_nr) !=
1712 order = get_order(req->tp_block_size);
1713 pg_vec = alloc_pg_vec(req, order);
1714 if (unlikely(!pg_vec))
1718 for (i = 0; i < req->tp_block_nr; i++) {
1719 char *ptr = pg_vec[i];
1720 struct tpacket_hdr *header;
1723 for (k = 0; k < po->frames_per_block; k++) {
1724 header = (struct tpacket_hdr *) ptr;
1725 header->tp_status = TP_STATUS_KERNEL;
1726 ptr += req->tp_frame_size;
1731 if (unlikely(req->tp_frame_nr))
1737 /* Detach socket from network */
1738 spin_lock(&po->bind_lock);
1739 was_running = po->running;
1742 __dev_remove_pack(&po->prot_hook);
1747 spin_unlock(&po->bind_lock);
1752 if (closing || atomic_read(&po->mapped) == 0) {
1754 #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
1756 spin_lock_bh(&sk->sk_receive_queue.lock);
1757 pg_vec = XC(po->pg_vec, pg_vec);
1758 po->frame_max = (req->tp_frame_nr - 1);
1760 po->frame_size = req->tp_frame_size;
1761 spin_unlock_bh(&sk->sk_receive_queue.lock);
1763 order = XC(po->pg_vec_order, order);
1764 req->tp_block_nr = XC(po->pg_vec_len, req->tp_block_nr);
1766 po->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
1767 po->prot_hook.func = po->pg_vec ? tpacket_rcv : packet_rcv;
1768 skb_queue_purge(&sk->sk_receive_queue);
1770 if (atomic_read(&po->mapped))
1771 printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped));
1774 spin_lock(&po->bind_lock);
1775 if (was_running && !po->running) {
1779 dev_add_pack(&po->prot_hook);
1781 spin_unlock(&po->bind_lock);
1786 free_pg_vec(pg_vec, order, req->tp_block_nr);
1791 static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1793 struct sock *sk = sock->sk;
1794 struct packet_sock *po = pkt_sk(sk);
1796 unsigned long start;
1803 size = vma->vm_end - vma->vm_start;
1806 if (po->pg_vec == NULL)
1808 if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE)
1811 start = vma->vm_start;
1812 for (i = 0; i < po->pg_vec_len; i++) {
1813 struct page *page = virt_to_page(po->pg_vec[i]);
1816 for (pg_num = 0; pg_num < po->pg_vec_pages; pg_num++, page++) {
1817 err = vm_insert_page(vma, start, page);
1823 atomic_inc(&po->mapped);
1824 vma->vm_ops = &packet_mmap_ops;
1834 #ifdef CONFIG_SOCK_PACKET
1835 static const struct proto_ops packet_ops_spkt = {
1836 .family = PF_PACKET,
1837 .owner = THIS_MODULE,
1838 .release = packet_release,
1839 .bind = packet_bind_spkt,
1840 .connect = sock_no_connect,
1841 .socketpair = sock_no_socketpair,
1842 .accept = sock_no_accept,
1843 .getname = packet_getname_spkt,
1844 .poll = datagram_poll,
1845 .ioctl = packet_ioctl,
1846 .listen = sock_no_listen,
1847 .shutdown = sock_no_shutdown,
1848 .setsockopt = sock_no_setsockopt,
1849 .getsockopt = sock_no_getsockopt,
1850 .sendmsg = packet_sendmsg_spkt,
1851 .recvmsg = packet_recvmsg,
1852 .mmap = sock_no_mmap,
1853 .sendpage = sock_no_sendpage,
1858 struct proto_ops packet_ops = {
1859 .family = PF_PACKET,
1860 .owner = THIS_MODULE,
1861 .release = packet_release,
1862 .bind = packet_bind,
1863 .connect = sock_no_connect,
1864 .socketpair = sock_no_socketpair,
1865 .accept = sock_no_accept,
1866 .getname = packet_getname,
1867 .poll = packet_poll,
1868 .ioctl = packet_ioctl,
1869 .listen = sock_no_listen,
1870 .shutdown = sock_no_shutdown,
1871 .setsockopt = packet_setsockopt,
1872 .getsockopt = packet_getsockopt,
1873 .sendmsg = packet_sendmsg,
1874 .recvmsg = packet_recvmsg,
1875 .mmap = packet_mmap,
1876 .sendpage = sock_no_sendpage,
1880 struct net_proto_family packet_family_ops = {
1881 .family = PF_PACKET,
1882 .create = packet_create,
1883 .owner = THIS_MODULE,
1886 static struct notifier_block packet_netdev_notifier = {
1887 .notifier_call =packet_notifier,
1890 #ifdef CONFIG_PROC_FS
1891 static inline struct sock *packet_seq_idx(loff_t off)
1894 struct hlist_node *node;
1896 sk_for_each(s, node, &packet_sklist) {
1903 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
1905 read_lock(&packet_sklist_lock);
1906 return *pos ? packet_seq_idx(*pos - 1) : SEQ_START_TOKEN;
1909 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1912 return (v == SEQ_START_TOKEN)
1913 ? sk_head(&packet_sklist)
1914 : sk_next((struct sock*)v) ;
1917 static void packet_seq_stop(struct seq_file *seq, void *v)
1919 read_unlock(&packet_sklist_lock);
1922 static int packet_seq_show(struct seq_file *seq, void *v)
1924 if (v == SEQ_START_TOKEN)
1925 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
1928 const struct packet_sock *po = pkt_sk(s);
1931 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
1933 atomic_read(&s->sk_refcnt),
1938 atomic_read(&s->sk_rmem_alloc),
1946 static struct seq_operations packet_seq_ops = {
1947 .start = packet_seq_start,
1948 .next = packet_seq_next,
1949 .stop = packet_seq_stop,
1950 .show = packet_seq_show,
1953 static int packet_seq_open(struct inode *inode, struct file *file)
1955 return seq_open(file, &packet_seq_ops);
1958 static struct file_operations packet_seq_fops = {
1959 .owner = THIS_MODULE,
1960 .open = packet_seq_open,
1962 .llseek = seq_lseek,
1963 .release = seq_release,
1968 static void __exit packet_exit(void)
1970 proc_net_remove("packet");
1971 unregister_netdevice_notifier(&packet_netdev_notifier);
1972 sock_unregister(PF_PACKET);
1973 proto_unregister(&packet_proto);
1976 static int __init packet_init(void)
1978 int rc = proto_register(&packet_proto, 0);
1983 sock_register(&packet_family_ops);
1984 register_netdevice_notifier(&packet_netdev_notifier);
1985 proc_net_fops_create("packet", 0, &packet_seq_fops);
1990 module_init(packet_init);
1991 module_exit(packet_exit);
1992 MODULE_LICENSE("GPL");
1993 MODULE_ALIAS_NETPROTO(PF_PACKET);