2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/config.h>
80 #include <linux/cpu.h>
81 #include <linux/types.h>
82 #include <linux/kernel.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/notifier.h>
95 #include <linux/skbuff.h>
97 #include <linux/rtnetlink.h>
98 #include <linux/proc_fs.h>
99 #include <linux/seq_file.h>
100 #include <linux/stat.h>
101 #include <linux/if_bridge.h>
102 #include <linux/divert.h>
104 #include <net/pkt_sched.h>
105 #include <net/checksum.h>
106 #include <linux/highmem.h>
107 #include <linux/init.h>
108 #include <linux/kmod.h>
109 #include <linux/module.h>
110 #include <linux/kallsyms.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <linux/wireless.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/err.h>
122 #include <linux/tcp.h>
123 #include <linux/udp.h>
127 * The list of packet types we will receive (as opposed to discard)
128 * and the routines to invoke.
130 * Why 16. Because with 16 the only overlap we get on a hash of the
131 * low nibble of the protocol value is RARP/SNAP/X.25.
133 * NOTE: That is no longer true with the addition of VLAN tags. Not
134 * sure which should go first, but I bet it won't make much
135 * difference if we are running VLANs. The good news is that
136 * this protocol won't be in the list unless compiled in, so
137 * the average user (w/out VLANs) will not be adversely affected.
154 static DEFINE_SPINLOCK(ptype_lock);
155 static struct list_head ptype_base[16]; /* 16 way hashed list */
156 static struct list_head ptype_all; /* Taps */
159 * The @dev_base list is protected by @dev_base_lock and the rtnl
162 * Pure readers hold dev_base_lock for reading.
164 * Writers must hold the rtnl semaphore while they loop through the
165 * dev_base list, and hold dev_base_lock for writing when they do the
166 * actual updates. This allows pure readers to access the list even
167 * while a writer is preparing to update it.
169 * To put it another way, dev_base_lock is held for writing only to
170 * protect against pure readers; the rtnl semaphore provides the
171 * protection against other writers.
173 * See, for example usages, register_netdevice() and
174 * unregister_netdevice(), which must be called with the rtnl
177 struct net_device *dev_base;
178 static struct net_device **dev_tail = &dev_base;
179 DEFINE_RWLOCK(dev_base_lock);
181 EXPORT_SYMBOL(dev_base);
182 EXPORT_SYMBOL(dev_base_lock);
184 #define NETDEV_HASHBITS 8
185 static struct hlist_head dev_name_head[1<<NETDEV_HASHBITS];
186 static struct hlist_head dev_index_head[1<<NETDEV_HASHBITS];
188 static inline struct hlist_head *dev_name_hash(const char *name)
190 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
191 return &dev_name_head[hash & ((1<<NETDEV_HASHBITS)-1)];
194 static inline struct hlist_head *dev_index_hash(int ifindex)
196 return &dev_index_head[ifindex & ((1<<NETDEV_HASHBITS)-1)];
203 static RAW_NOTIFIER_HEAD(netdev_chain);
206 * Device drivers call our routines to queue packets here. We empty the
207 * queue in the local softnet handler.
209 DEFINE_PER_CPU(struct softnet_data, softnet_data) = { NULL };
212 extern int netdev_sysfs_init(void);
213 extern int netdev_register_sysfs(struct net_device *);
214 extern void netdev_unregister_sysfs(struct net_device *);
216 #define netdev_sysfs_init() (0)
217 #define netdev_register_sysfs(dev) (0)
218 #define netdev_unregister_sysfs(dev) do { } while(0)
222 /*******************************************************************************
224 Protocol management and registration routines
226 *******************************************************************************/
235 * Add a protocol ID to the list. Now that the input handler is
236 * smarter we can dispense with all the messy stuff that used to be
239 * BEWARE!!! Protocol handlers, mangling input packets,
240 * MUST BE last in hash buckets and checking protocol handlers
241 * MUST start from promiscuous ptype_all chain in net_bh.
242 * It is true now, do not change it.
243 * Explanation follows: if protocol handler, mangling packet, will
244 * be the first on list, it is not able to sense, that packet
245 * is cloned and should be copied-on-write, so that it will
246 * change it and subsequent readers will get broken packet.
251 * dev_add_pack - add packet handler
252 * @pt: packet type declaration
254 * Add a protocol handler to the networking stack. The passed &packet_type
255 * is linked into kernel lists and may not be freed until it has been
256 * removed from the kernel lists.
258 * This call does not sleep therefore it can not
259 * guarantee all CPU's that are in middle of receiving packets
260 * will see the new packet type (until the next received packet).
263 void dev_add_pack(struct packet_type *pt)
267 spin_lock_bh(&ptype_lock);
268 if (pt->type == htons(ETH_P_ALL)) {
270 list_add_rcu(&pt->list, &ptype_all);
272 hash = ntohs(pt->type) & 15;
273 list_add_rcu(&pt->list, &ptype_base[hash]);
275 spin_unlock_bh(&ptype_lock);
279 * __dev_remove_pack - remove packet handler
280 * @pt: packet type declaration
282 * Remove a protocol handler that was previously added to the kernel
283 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
284 * from the kernel lists and can be freed or reused once this function
287 * The packet type might still be in use by receivers
288 * and must not be freed until after all the CPU's have gone
289 * through a quiescent state.
291 void __dev_remove_pack(struct packet_type *pt)
293 struct list_head *head;
294 struct packet_type *pt1;
296 spin_lock_bh(&ptype_lock);
298 if (pt->type == htons(ETH_P_ALL)) {
302 head = &ptype_base[ntohs(pt->type) & 15];
304 list_for_each_entry(pt1, head, list) {
306 list_del_rcu(&pt->list);
311 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
313 spin_unlock_bh(&ptype_lock);
316 * dev_remove_pack - remove packet handler
317 * @pt: packet type declaration
319 * Remove a protocol handler that was previously added to the kernel
320 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
321 * from the kernel lists and can be freed or reused once this function
324 * This call sleeps to guarantee that no CPU is looking at the packet
327 void dev_remove_pack(struct packet_type *pt)
329 __dev_remove_pack(pt);
334 /******************************************************************************
336 Device Boot-time Settings Routines
338 *******************************************************************************/
340 /* Boot time configuration table */
341 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
344 * netdev_boot_setup_add - add new setup entry
345 * @name: name of the device
346 * @map: configured settings for the device
348 * Adds new setup entry to the dev_boot_setup list. The function
349 * returns 0 on error and 1 on success. This is a generic routine to
352 static int netdev_boot_setup_add(char *name, struct ifmap *map)
354 struct netdev_boot_setup *s;
358 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
359 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
360 memset(s[i].name, 0, sizeof(s[i].name));
361 strcpy(s[i].name, name);
362 memcpy(&s[i].map, map, sizeof(s[i].map));
367 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
371 * netdev_boot_setup_check - check boot time settings
372 * @dev: the netdevice
374 * Check boot time settings for the device.
375 * The found settings are set for the device to be used
376 * later in the device probing.
377 * Returns 0 if no settings found, 1 if they are.
379 int netdev_boot_setup_check(struct net_device *dev)
381 struct netdev_boot_setup *s = dev_boot_setup;
384 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
385 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
386 !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
387 dev->irq = s[i].map.irq;
388 dev->base_addr = s[i].map.base_addr;
389 dev->mem_start = s[i].map.mem_start;
390 dev->mem_end = s[i].map.mem_end;
399 * netdev_boot_base - get address from boot time settings
400 * @prefix: prefix for network device
401 * @unit: id for network device
403 * Check boot time settings for the base address of device.
404 * The found settings are set for the device to be used
405 * later in the device probing.
406 * Returns 0 if no settings found.
408 unsigned long netdev_boot_base(const char *prefix, int unit)
410 const struct netdev_boot_setup *s = dev_boot_setup;
414 sprintf(name, "%s%d", prefix, unit);
417 * If device already registered then return base of 1
418 * to indicate not to probe for this interface
420 if (__dev_get_by_name(name))
423 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
424 if (!strcmp(name, s[i].name))
425 return s[i].map.base_addr;
430 * Saves at boot time configured settings for any netdevice.
432 int __init netdev_boot_setup(char *str)
437 str = get_options(str, ARRAY_SIZE(ints), ints);
442 memset(&map, 0, sizeof(map));
446 map.base_addr = ints[2];
448 map.mem_start = ints[3];
450 map.mem_end = ints[4];
452 /* Add new entry to the list */
453 return netdev_boot_setup_add(str, &map);
456 __setup("netdev=", netdev_boot_setup);
458 /*******************************************************************************
460 Device Interface Subroutines
462 *******************************************************************************/
465 * __dev_get_by_name - find a device by its name
466 * @name: name to find
468 * Find an interface by name. Must be called under RTNL semaphore
469 * or @dev_base_lock. If the name is found a pointer to the device
470 * is returned. If the name is not found then %NULL is returned. The
471 * reference counters are not incremented so the caller must be
472 * careful with locks.
475 struct net_device *__dev_get_by_name(const char *name)
477 struct hlist_node *p;
479 hlist_for_each(p, dev_name_hash(name)) {
480 struct net_device *dev
481 = hlist_entry(p, struct net_device, name_hlist);
482 if (!strncmp(dev->name, name, IFNAMSIZ))
489 * dev_get_by_name - find a device by its name
490 * @name: name to find
492 * Find an interface by name. This can be called from any
493 * context and does its own locking. The returned handle has
494 * the usage count incremented and the caller must use dev_put() to
495 * release it when it is no longer needed. %NULL is returned if no
496 * matching device is found.
499 struct net_device *dev_get_by_name(const char *name)
501 struct net_device *dev;
503 read_lock(&dev_base_lock);
504 dev = __dev_get_by_name(name);
507 read_unlock(&dev_base_lock);
512 * __dev_get_by_index - find a device by its ifindex
513 * @ifindex: index of device
515 * Search for an interface by index. Returns %NULL if the device
516 * is not found or a pointer to the device. The device has not
517 * had its reference counter increased so the caller must be careful
518 * about locking. The caller must hold either the RTNL semaphore
522 struct net_device *__dev_get_by_index(int ifindex)
524 struct hlist_node *p;
526 hlist_for_each(p, dev_index_hash(ifindex)) {
527 struct net_device *dev
528 = hlist_entry(p, struct net_device, index_hlist);
529 if (dev->ifindex == ifindex)
537 * dev_get_by_index - find a device by its ifindex
538 * @ifindex: index of device
540 * Search for an interface by index. Returns NULL if the device
541 * is not found or a pointer to the device. The device returned has
542 * had a reference added and the pointer is safe until the user calls
543 * dev_put to indicate they have finished with it.
546 struct net_device *dev_get_by_index(int ifindex)
548 struct net_device *dev;
550 read_lock(&dev_base_lock);
551 dev = __dev_get_by_index(ifindex);
554 read_unlock(&dev_base_lock);
559 * dev_getbyhwaddr - find a device by its hardware address
560 * @type: media type of device
561 * @ha: hardware address
563 * Search for an interface by MAC address. Returns NULL if the device
564 * is not found or a pointer to the device. The caller must hold the
565 * rtnl semaphore. The returned device has not had its ref count increased
566 * and the caller must therefore be careful about locking
569 * If the API was consistent this would be __dev_get_by_hwaddr
572 struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
574 struct net_device *dev;
578 for (dev = dev_base; dev; dev = dev->next)
579 if (dev->type == type &&
580 !memcmp(dev->dev_addr, ha, dev->addr_len))
585 EXPORT_SYMBOL(dev_getbyhwaddr);
587 struct net_device *dev_getfirstbyhwtype(unsigned short type)
589 struct net_device *dev;
592 for (dev = dev_base; dev; dev = dev->next) {
593 if (dev->type == type) {
602 EXPORT_SYMBOL(dev_getfirstbyhwtype);
605 * dev_get_by_flags - find any device with given flags
606 * @if_flags: IFF_* values
607 * @mask: bitmask of bits in if_flags to check
609 * Search for any interface with the given flags. Returns NULL if a device
610 * is not found or a pointer to the device. The device returned has
611 * had a reference added and the pointer is safe until the user calls
612 * dev_put to indicate they have finished with it.
615 struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask)
617 struct net_device *dev;
619 read_lock(&dev_base_lock);
620 for (dev = dev_base; dev != NULL; dev = dev->next) {
621 if (((dev->flags ^ if_flags) & mask) == 0) {
626 read_unlock(&dev_base_lock);
631 * dev_valid_name - check if name is okay for network device
634 * Network device names need to be valid file names to
635 * to allow sysfs to work
637 int dev_valid_name(const char *name)
639 return !(*name == '\0'
640 || !strcmp(name, ".")
641 || !strcmp(name, "..")
642 || strchr(name, '/'));
646 * dev_alloc_name - allocate a name for a device
648 * @name: name format string
650 * Passed a format string - eg "lt%d" it will try and find a suitable
651 * id. It scans list of devices to build up a free map, then chooses
652 * the first empty slot. The caller must hold the dev_base or rtnl lock
653 * while allocating the name and adding the device in order to avoid
655 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
656 * Returns the number of the unit assigned or a negative errno code.
659 int dev_alloc_name(struct net_device *dev, const char *name)
664 const int max_netdevices = 8*PAGE_SIZE;
666 struct net_device *d;
668 p = strnchr(name, IFNAMSIZ-1, '%');
671 * Verify the string as this thing may have come from
672 * the user. There must be either one "%d" and no other "%"
675 if (p[1] != 'd' || strchr(p + 2, '%'))
678 /* Use one page as a bit array of possible slots */
679 inuse = (long *) get_zeroed_page(GFP_ATOMIC);
683 for (d = dev_base; d; d = d->next) {
684 if (!sscanf(d->name, name, &i))
686 if (i < 0 || i >= max_netdevices)
689 /* avoid cases where sscanf is not exact inverse of printf */
690 snprintf(buf, sizeof(buf), name, i);
691 if (!strncmp(buf, d->name, IFNAMSIZ))
695 i = find_first_zero_bit(inuse, max_netdevices);
696 free_page((unsigned long) inuse);
699 snprintf(buf, sizeof(buf), name, i);
700 if (!__dev_get_by_name(buf)) {
701 strlcpy(dev->name, buf, IFNAMSIZ);
705 /* It is possible to run out of possible slots
706 * when the name is long and there isn't enough space left
707 * for the digits, or if all bits are used.
714 * dev_change_name - change name of a device
716 * @newname: name (or format string) must be at least IFNAMSIZ
718 * Change name of a device, can pass format strings "eth%d".
721 int dev_change_name(struct net_device *dev, char *newname)
727 if (dev->flags & IFF_UP)
730 if (!dev_valid_name(newname))
733 if (strchr(newname, '%')) {
734 err = dev_alloc_name(dev, newname);
737 strcpy(newname, dev->name);
739 else if (__dev_get_by_name(newname))
742 strlcpy(dev->name, newname, IFNAMSIZ);
744 err = class_device_rename(&dev->class_dev, dev->name);
746 hlist_del(&dev->name_hlist);
747 hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
748 raw_notifier_call_chain(&netdev_chain,
749 NETDEV_CHANGENAME, dev);
756 * netdev_features_change - device changes features
757 * @dev: device to cause notification
759 * Called to indicate a device has changed features.
761 void netdev_features_change(struct net_device *dev)
763 raw_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
765 EXPORT_SYMBOL(netdev_features_change);
768 * netdev_state_change - device changes state
769 * @dev: device to cause notification
771 * Called to indicate a device has changed state. This function calls
772 * the notifier chains for netdev_chain and sends a NEWLINK message
773 * to the routing socket.
775 void netdev_state_change(struct net_device *dev)
777 if (dev->flags & IFF_UP) {
778 raw_notifier_call_chain(&netdev_chain,
780 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
785 * dev_load - load a network module
786 * @name: name of interface
788 * If a network interface is not present and the process has suitable
789 * privileges this function loads the module. If module loading is not
790 * available in this kernel then it becomes a nop.
793 void dev_load(const char *name)
795 struct net_device *dev;
797 read_lock(&dev_base_lock);
798 dev = __dev_get_by_name(name);
799 read_unlock(&dev_base_lock);
801 if (!dev && capable(CAP_SYS_MODULE))
802 request_module("%s", name);
805 static int default_rebuild_header(struct sk_buff *skb)
807 printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n",
808 skb->dev ? skb->dev->name : "NULL!!!");
815 * dev_open - prepare an interface for use.
816 * @dev: device to open
818 * Takes a device from down to up state. The device's private open
819 * function is invoked and then the multicast lists are loaded. Finally
820 * the device is moved into the up state and a %NETDEV_UP message is
821 * sent to the netdev notifier chain.
823 * Calling this function on an active interface is a nop. On a failure
824 * a negative errno code is returned.
826 int dev_open(struct net_device *dev)
834 if (dev->flags & IFF_UP)
838 * Is it even present?
840 if (!netif_device_present(dev))
844 * Call device private open method
846 set_bit(__LINK_STATE_START, &dev->state);
848 ret = dev->open(dev);
850 clear_bit(__LINK_STATE_START, &dev->state);
854 * If it went open OK then:
861 dev->flags |= IFF_UP;
864 * Initialize multicasting status
869 * Wakeup transmit queue engine
874 * ... and announce new interface.
876 raw_notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
882 * dev_close - shutdown an interface.
883 * @dev: device to shutdown
885 * This function moves an active device into down state. A
886 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
887 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
890 int dev_close(struct net_device *dev)
892 if (!(dev->flags & IFF_UP))
896 * Tell people we are going down, so that they can
897 * prepare to death, when device is still operating.
899 raw_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
903 clear_bit(__LINK_STATE_START, &dev->state);
905 /* Synchronize to scheduled poll. We cannot touch poll list,
906 * it can be even on different cpu. So just clear netif_running(),
907 * and wait when poll really will happen. Actually, the best place
908 * for this is inside dev->stop() after device stopped its irq
909 * engine, but this requires more changes in devices. */
911 smp_mb__after_clear_bit(); /* Commit netif_running(). */
912 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
918 * Call the device specific close. This cannot fail.
919 * Only if device is UP
921 * We allow it to be called even after a DETACH hot-plug
928 * Device is now down.
931 dev->flags &= ~IFF_UP;
934 * Tell people we are down
936 raw_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
943 * Device change register/unregister. These are not inline or static
944 * as we export them to the world.
948 * register_netdevice_notifier - register a network notifier block
951 * Register a notifier to be called when network device events occur.
952 * The notifier passed is linked into the kernel structures and must
953 * not be reused until it has been unregistered. A negative errno code
954 * is returned on a failure.
956 * When registered all registration and up events are replayed
957 * to the new notifier to allow device to have a race free
958 * view of the network device list.
961 int register_netdevice_notifier(struct notifier_block *nb)
963 struct net_device *dev;
967 err = raw_notifier_chain_register(&netdev_chain, nb);
969 for (dev = dev_base; dev; dev = dev->next) {
970 nb->notifier_call(nb, NETDEV_REGISTER, dev);
972 if (dev->flags & IFF_UP)
973 nb->notifier_call(nb, NETDEV_UP, dev);
981 * unregister_netdevice_notifier - unregister a network notifier block
984 * Unregister a notifier previously registered by
985 * register_netdevice_notifier(). The notifier is unlinked into the
986 * kernel structures and may then be reused. A negative errno code
987 * is returned on a failure.
990 int unregister_netdevice_notifier(struct notifier_block *nb)
995 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1001 * call_netdevice_notifiers - call all network notifier blocks
1002 * @val: value passed unmodified to notifier function
1003 * @v: pointer passed unmodified to notifier function
1005 * Call all network notifier blocks. Parameters and return value
1006 * are as for raw_notifier_call_chain().
1009 int call_netdevice_notifiers(unsigned long val, void *v)
1011 return raw_notifier_call_chain(&netdev_chain, val, v);
1014 /* When > 0 there are consumers of rx skb time stamps */
1015 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1017 void net_enable_timestamp(void)
1019 atomic_inc(&netstamp_needed);
1022 void net_disable_timestamp(void)
1024 atomic_dec(&netstamp_needed);
1027 void __net_timestamp(struct sk_buff *skb)
1031 do_gettimeofday(&tv);
1032 skb_set_timestamp(skb, &tv);
1034 EXPORT_SYMBOL(__net_timestamp);
1036 static inline void net_timestamp(struct sk_buff *skb)
1038 if (atomic_read(&netstamp_needed))
1039 __net_timestamp(skb);
1041 skb->tstamp.off_sec = 0;
1042 skb->tstamp.off_usec = 0;
1047 * Support routine. Sends outgoing frames to any network
1048 * taps currently in use.
1051 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1053 struct packet_type *ptype;
1058 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1059 /* Never send packets back to the socket
1060 * they originated from - MvS (miquels@drinkel.ow.org)
1062 if ((ptype->dev == dev || !ptype->dev) &&
1063 (ptype->af_packet_priv == NULL ||
1064 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1065 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1069 /* skb->nh should be correctly
1070 set by sender, so that the second statement is
1071 just protection against buggy protocols.
1073 skb2->mac.raw = skb2->data;
1075 if (skb2->nh.raw < skb2->data ||
1076 skb2->nh.raw > skb2->tail) {
1077 if (net_ratelimit())
1078 printk(KERN_CRIT "protocol %04x is "
1080 skb2->protocol, dev->name);
1081 skb2->nh.raw = skb2->data;
1084 skb2->h.raw = skb2->nh.raw;
1085 skb2->pkt_type = PACKET_OUTGOING;
1086 ptype->func(skb2, skb->dev, ptype, skb->dev);
1093 void __netif_schedule(struct net_device *dev)
1095 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
1096 unsigned long flags;
1097 struct softnet_data *sd;
1099 local_irq_save(flags);
1100 sd = &__get_cpu_var(softnet_data);
1101 dev->next_sched = sd->output_queue;
1102 sd->output_queue = dev;
1103 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1104 local_irq_restore(flags);
1107 EXPORT_SYMBOL(__netif_schedule);
1109 void __netif_rx_schedule(struct net_device *dev)
1111 unsigned long flags;
1113 local_irq_save(flags);
1115 list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
1117 dev->quota += dev->weight;
1119 dev->quota = dev->weight;
1120 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
1121 local_irq_restore(flags);
1123 EXPORT_SYMBOL(__netif_rx_schedule);
1125 void dev_kfree_skb_any(struct sk_buff *skb)
1127 if (in_irq() || irqs_disabled())
1128 dev_kfree_skb_irq(skb);
1132 EXPORT_SYMBOL(dev_kfree_skb_any);
1136 void netif_device_detach(struct net_device *dev)
1138 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1139 netif_running(dev)) {
1140 netif_stop_queue(dev);
1143 EXPORT_SYMBOL(netif_device_detach);
1145 void netif_device_attach(struct net_device *dev)
1147 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1148 netif_running(dev)) {
1149 netif_wake_queue(dev);
1150 __netdev_watchdog_up(dev);
1153 EXPORT_SYMBOL(netif_device_attach);
1157 * Invalidate hardware checksum when packet is to be mangled, and
1158 * complete checksum manually on outgoing path.
1160 int skb_checksum_help(struct sk_buff *skb, int inward)
1163 int ret = 0, offset = skb->h.raw - skb->data;
1166 skb->ip_summed = CHECKSUM_NONE;
1170 if (skb_cloned(skb)) {
1171 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1176 BUG_ON(offset > (int)skb->len);
1177 csum = skb_checksum(skb, offset, skb->len-offset, 0);
1179 offset = skb->tail - skb->h.raw;
1180 BUG_ON(offset <= 0);
1181 BUG_ON(skb->csum + 2 > offset);
1183 *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
1184 skb->ip_summed = CHECKSUM_NONE;
1190 * skb_gso_segment - Perform segmentation on skb.
1191 * @skb: buffer to segment
1192 * @features: features for the output path (see dev->features)
1194 * This function segments the given skb and returns a list of segments.
1196 * It may return NULL if the skb requires no segmentation. This is
1197 * only possible when GSO is used for verifying header integrity.
1199 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1201 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1202 struct packet_type *ptype;
1203 int type = skb->protocol;
1205 BUG_ON(skb_shinfo(skb)->frag_list);
1206 BUG_ON(skb->ip_summed != CHECKSUM_HW);
1208 skb->mac.raw = skb->data;
1209 skb->mac_len = skb->nh.raw - skb->data;
1210 __skb_pull(skb, skb->mac_len);
1213 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
1214 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1215 segs = ptype->gso_segment(skb, features);
1221 __skb_push(skb, skb->data - skb->mac.raw);
1226 EXPORT_SYMBOL(skb_gso_segment);
1228 /* Take action when hardware reception checksum errors are detected. */
1230 void netdev_rx_csum_fault(struct net_device *dev)
1232 if (net_ratelimit()) {
1233 printk(KERN_ERR "%s: hw csum failure.\n",
1234 dev ? dev->name : "<unknown>");
1238 EXPORT_SYMBOL(netdev_rx_csum_fault);
1241 #ifdef CONFIG_HIGHMEM
1242 /* Actually, we should eliminate this check as soon as we know, that:
1243 * 1. IOMMU is present and allows to map all the memory.
1244 * 2. No high memory really exists on this machine.
1247 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1251 if (dev->features & NETIF_F_HIGHDMA)
1254 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1255 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1261 #define illegal_highdma(dev, skb) (0)
1265 void (*destructor)(struct sk_buff *skb);
1268 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1270 static void dev_gso_skb_destructor(struct sk_buff *skb)
1272 struct dev_gso_cb *cb;
1275 struct sk_buff *nskb = skb->next;
1277 skb->next = nskb->next;
1280 } while (skb->next);
1282 cb = DEV_GSO_CB(skb);
1284 cb->destructor(skb);
1288 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1289 * @skb: buffer to segment
1291 * This function segments the given skb and stores the list of segments
1294 static int dev_gso_segment(struct sk_buff *skb)
1296 struct net_device *dev = skb->dev;
1297 struct sk_buff *segs;
1298 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1301 segs = skb_gso_segment(skb, features);
1303 /* Verifying header integrity only. */
1307 if (unlikely(IS_ERR(segs)))
1308 return PTR_ERR(segs);
1311 DEV_GSO_CB(skb)->destructor = skb->destructor;
1312 skb->destructor = dev_gso_skb_destructor;
1317 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1319 if (likely(!skb->next)) {
1321 dev_queue_xmit_nit(skb, dev);
1323 if (netif_needs_gso(dev, skb)) {
1324 if (unlikely(dev_gso_segment(skb)))
1330 return dev->hard_start_xmit(skb, dev);
1335 struct sk_buff *nskb = skb->next;
1338 skb->next = nskb->next;
1340 rc = dev->hard_start_xmit(nskb, dev);
1342 nskb->next = skb->next;
1346 if (unlikely(netif_queue_stopped(dev) && skb->next))
1347 return NETDEV_TX_BUSY;
1348 } while (skb->next);
1350 skb->destructor = DEV_GSO_CB(skb)->destructor;
1357 #define HARD_TX_LOCK(dev, cpu) { \
1358 if ((dev->features & NETIF_F_LLTX) == 0) { \
1359 netif_tx_lock(dev); \
1363 #define HARD_TX_UNLOCK(dev) { \
1364 if ((dev->features & NETIF_F_LLTX) == 0) { \
1365 netif_tx_unlock(dev); \
1370 inline int skb_checksum_setup(struct sk_buff *skb)
1372 if (skb->proto_csum_blank) {
1373 if (skb->protocol != htons(ETH_P_IP))
1375 skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl;
1376 if (skb->h.raw >= skb->tail)
1378 switch (skb->nh.iph->protocol) {
1380 skb->csum = offsetof(struct tcphdr, check);
1383 skb->csum = offsetof(struct udphdr, check);
1386 if (net_ratelimit())
1387 printk(KERN_ERR "Attempting to checksum a non-"
1388 "TCP/UDP packet, dropping a protocol"
1389 " %d packet", skb->nh.iph->protocol);
1392 if ((skb->h.raw + skb->csum + 2) > skb->tail)
1394 skb->ip_summed = CHECKSUM_HW;
1395 skb->proto_csum_blank = 0;
1402 inline int skb_checksum_setup(struct sk_buff *skb) { return 0; }
1407 * dev_queue_xmit - transmit a buffer
1408 * @skb: buffer to transmit
1410 * Queue a buffer for transmission to a network device. The caller must
1411 * have set the device and priority and built the buffer before calling
1412 * this function. The function can be called from an interrupt.
1414 * A negative errno code is returned on a failure. A success does not
1415 * guarantee the frame will be transmitted as it may be dropped due
1416 * to congestion or traffic shaping.
1418 * -----------------------------------------------------------------------------------
1419 * I notice this method can also return errors from the queue disciplines,
1420 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1423 * Regardless of the return value, the skb is consumed, so it is currently
1424 * difficult to retry a send to this method. (You can bump the ref count
1425 * before sending to hold a reference for retry if you are careful.)
1427 * When calling this method, interrupts MUST be enabled. This is because
1428 * the BH enable code must have IRQs enabled so that it will not deadlock.
1432 int dev_queue_xmit(struct sk_buff *skb)
1434 struct net_device *dev = skb->dev;
1438 /* If a checksum-deferred packet is forwarded to a device that needs a
1439 * checksum, correct the pointers and force checksumming.
1441 if (skb_checksum_setup(skb))
1444 /* GSO will handle the following emulations directly. */
1445 if (netif_needs_gso(dev, skb))
1448 if (skb_shinfo(skb)->frag_list &&
1449 !(dev->features & NETIF_F_FRAGLIST) &&
1450 __skb_linearize(skb))
1453 /* Fragmented skb is linearized if device does not support SG,
1454 * or if at least one of fragments is in highmem and device
1455 * does not support DMA from it.
1457 if (skb_shinfo(skb)->nr_frags &&
1458 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1459 __skb_linearize(skb))
1462 /* If packet is not checksummed and device does not support
1463 * checksumming for this protocol, complete checksumming here.
1465 if (skb->ip_summed == CHECKSUM_HW &&
1466 (!(dev->features & NETIF_F_GEN_CSUM) &&
1467 (!(dev->features & NETIF_F_IP_CSUM) ||
1468 skb->protocol != htons(ETH_P_IP))))
1469 if (skb_checksum_help(skb, 0))
1473 spin_lock_prefetch(&dev->queue_lock);
1475 /* Disable soft irqs for various locks below. Also
1476 * stops preemption for RCU.
1480 /* Updates of qdisc are serialized by queue_lock.
1481 * The struct Qdisc which is pointed to by qdisc is now a
1482 * rcu structure - it may be accessed without acquiring
1483 * a lock (but the structure may be stale.) The freeing of the
1484 * qdisc will be deferred until it's known that there are no
1485 * more references to it.
1487 * If the qdisc has an enqueue function, we still need to
1488 * hold the queue_lock before calling it, since queue_lock
1489 * also serializes access to the device queue.
1492 q = rcu_dereference(dev->qdisc);
1493 #ifdef CONFIG_NET_CLS_ACT
1494 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1497 /* Grab device queue */
1498 spin_lock(&dev->queue_lock);
1500 rc = q->enqueue(skb, q);
1504 spin_unlock(&dev->queue_lock);
1505 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1509 /* The device has no queue. Common case for software devices:
1510 loopback, all the sorts of tunnels...
1512 Really, it is unlikely that netif_tx_lock protection is necessary
1513 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1515 However, it is possible, that they rely on protection
1518 Check this and shot the lock. It is not prone from deadlocks.
1519 Either shot noqueue qdisc, it is even simpler 8)
1521 if (dev->flags & IFF_UP) {
1522 int cpu = smp_processor_id(); /* ok because BHs are off */
1524 if (dev->xmit_lock_owner != cpu) {
1526 HARD_TX_LOCK(dev, cpu);
1528 if (!netif_queue_stopped(dev)) {
1530 if (!dev_hard_start_xmit(skb, dev)) {
1531 HARD_TX_UNLOCK(dev);
1535 HARD_TX_UNLOCK(dev);
1536 if (net_ratelimit())
1537 printk(KERN_CRIT "Virtual device %s asks to "
1538 "queue packet!\n", dev->name);
1540 /* Recursion is detected! It is possible,
1542 if (net_ratelimit())
1543 printk(KERN_CRIT "Dead loop on virtual device "
1544 "%s, fix it urgently!\n", dev->name);
1549 rcu_read_unlock_bh();
1555 rcu_read_unlock_bh();
1560 /*=======================================================================
1562 =======================================================================*/
1564 int netdev_max_backlog = 1000;
1565 int netdev_budget = 300;
1566 int weight_p = 64; /* old backlog weight */
1568 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1572 * netif_rx - post buffer to the network code
1573 * @skb: buffer to post
1575 * This function receives a packet from a device driver and queues it for
1576 * the upper (protocol) levels to process. It always succeeds. The buffer
1577 * may be dropped during processing for congestion control or by the
1581 * NET_RX_SUCCESS (no congestion)
1582 * NET_RX_CN_LOW (low congestion)
1583 * NET_RX_CN_MOD (moderate congestion)
1584 * NET_RX_CN_HIGH (high congestion)
1585 * NET_RX_DROP (packet was dropped)
1589 int netif_rx(struct sk_buff *skb)
1591 struct softnet_data *queue;
1592 unsigned long flags;
1594 /* if netpoll wants it, pretend we never saw it */
1595 if (netpoll_rx(skb))
1598 if (!skb->tstamp.off_sec)
1602 * The code is rearranged so that the path is the most
1603 * short when CPU is congested, but is still operating.
1605 local_irq_save(flags);
1606 queue = &__get_cpu_var(softnet_data);
1608 __get_cpu_var(netdev_rx_stat).total++;
1609 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1610 if (queue->input_pkt_queue.qlen) {
1613 __skb_queue_tail(&queue->input_pkt_queue, skb);
1614 local_irq_restore(flags);
1615 return NET_RX_SUCCESS;
1618 netif_rx_schedule(&queue->backlog_dev);
1622 __get_cpu_var(netdev_rx_stat).dropped++;
1623 local_irq_restore(flags);
1629 int netif_rx_ni(struct sk_buff *skb)
1634 err = netif_rx(skb);
1635 if (local_softirq_pending())
1642 EXPORT_SYMBOL(netif_rx_ni);
1644 static inline struct net_device *skb_bond(struct sk_buff *skb)
1646 struct net_device *dev = skb->dev;
1650 * On bonding slaves other than the currently active
1651 * slave, suppress duplicates except for 802.3ad
1652 * ETH_P_SLOW and alb non-mcast/bcast.
1654 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
1655 if (dev->master->priv_flags & IFF_MASTER_ALB) {
1656 if (skb->pkt_type != PACKET_BROADCAST &&
1657 skb->pkt_type != PACKET_MULTICAST)
1661 if (dev->master->priv_flags & IFF_MASTER_8023AD &&
1662 skb->protocol == __constant_htons(ETH_P_SLOW))
1669 skb->dev = dev->master;
1675 static void net_tx_action(struct softirq_action *h)
1677 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1679 if (sd->completion_queue) {
1680 struct sk_buff *clist;
1682 local_irq_disable();
1683 clist = sd->completion_queue;
1684 sd->completion_queue = NULL;
1688 struct sk_buff *skb = clist;
1689 clist = clist->next;
1691 BUG_TRAP(!atomic_read(&skb->users));
1696 if (sd->output_queue) {
1697 struct net_device *head;
1699 local_irq_disable();
1700 head = sd->output_queue;
1701 sd->output_queue = NULL;
1705 struct net_device *dev = head;
1706 head = head->next_sched;
1708 smp_mb__before_clear_bit();
1709 clear_bit(__LINK_STATE_SCHED, &dev->state);
1711 if (spin_trylock(&dev->queue_lock)) {
1713 spin_unlock(&dev->queue_lock);
1715 netif_schedule(dev);
1721 static __inline__ int deliver_skb(struct sk_buff *skb,
1722 struct packet_type *pt_prev,
1723 struct net_device *orig_dev)
1725 atomic_inc(&skb->users);
1726 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1729 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
1730 int (*br_handle_frame_hook)(struct net_bridge_port *p, struct sk_buff **pskb);
1732 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
1733 unsigned char *addr);
1734 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent);
1736 static __inline__ int handle_bridge(struct sk_buff **pskb,
1737 struct packet_type **pt_prev, int *ret,
1738 struct net_device *orig_dev)
1740 struct net_bridge_port *port;
1742 if ((*pskb)->pkt_type == PACKET_LOOPBACK ||
1743 (port = rcu_dereference((*pskb)->dev->br_port)) == NULL)
1747 *ret = deliver_skb(*pskb, *pt_prev, orig_dev);
1751 return br_handle_frame_hook(port, pskb);
1754 #define handle_bridge(skb, pt_prev, ret, orig_dev) (0)
1757 #ifdef CONFIG_NET_CLS_ACT
1758 /* TODO: Maybe we should just force sch_ingress to be compiled in
1759 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
1760 * a compare and 2 stores extra right now if we dont have it on
1761 * but have CONFIG_NET_CLS_ACT
1762 * NOTE: This doesnt stop any functionality; if you dont have
1763 * the ingress scheduler, you just cant add policies on ingress.
1766 static int ing_filter(struct sk_buff *skb)
1769 struct net_device *dev = skb->dev;
1770 int result = TC_ACT_OK;
1772 if (dev->qdisc_ingress) {
1773 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
1774 if (MAX_RED_LOOP < ttl++) {
1775 printk("Redir loop detected Dropping packet (%s->%s)\n",
1776 skb->input_dev->name, skb->dev->name);
1780 skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl);
1782 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
1784 spin_lock(&dev->ingress_lock);
1785 if ((q = dev->qdisc_ingress) != NULL)
1786 result = q->enqueue(skb, q);
1787 spin_unlock(&dev->ingress_lock);
1795 int netif_receive_skb(struct sk_buff *skb)
1797 struct packet_type *ptype, *pt_prev;
1798 struct net_device *orig_dev;
1799 int ret = NET_RX_DROP;
1800 unsigned short type;
1802 /* if we've gotten here through NAPI, check netpoll */
1803 if (skb->dev->poll && netpoll_rx(skb))
1806 if (!skb->tstamp.off_sec)
1809 if (!skb->input_dev)
1810 skb->input_dev = skb->dev;
1812 orig_dev = skb_bond(skb);
1817 __get_cpu_var(netdev_rx_stat).total++;
1819 skb->h.raw = skb->nh.raw = skb->data;
1820 skb->mac_len = skb->nh.raw - skb->mac.raw;
1826 #ifdef CONFIG_NET_CLS_ACT
1827 if (skb->tc_verd & TC_NCLS) {
1828 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
1834 switch (skb->ip_summed) {
1835 case CHECKSUM_UNNECESSARY:
1836 skb->proto_data_valid = 1;
1839 /* XXX Implement me. */
1841 skb->proto_data_valid = 0;
1846 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1847 if (!ptype->dev || ptype->dev == skb->dev) {
1849 ret = deliver_skb(skb, pt_prev, orig_dev);
1854 #ifdef CONFIG_NET_CLS_ACT
1856 ret = deliver_skb(skb, pt_prev, orig_dev);
1857 pt_prev = NULL; /* noone else should process this after*/
1859 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
1862 ret = ing_filter(skb);
1864 if (ret == TC_ACT_SHOT || (ret == TC_ACT_STOLEN)) {
1873 handle_diverter(skb);
1875 if (handle_bridge(&skb, &pt_prev, &ret, orig_dev))
1878 type = skb->protocol;
1879 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
1880 if (ptype->type == type &&
1881 (!ptype->dev || ptype->dev == skb->dev)) {
1883 ret = deliver_skb(skb, pt_prev, orig_dev);
1889 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1892 /* Jamal, now you will not able to escape explaining
1893 * me how you were going to use this. :-)
1903 static int process_backlog(struct net_device *backlog_dev, int *budget)
1906 int quota = min(backlog_dev->quota, *budget);
1907 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1908 unsigned long start_time = jiffies;
1910 backlog_dev->weight = weight_p;
1912 struct sk_buff *skb;
1913 struct net_device *dev;
1915 local_irq_disable();
1916 skb = __skb_dequeue(&queue->input_pkt_queue);
1923 netif_receive_skb(skb);
1929 if (work >= quota || jiffies - start_time > 1)
1934 backlog_dev->quota -= work;
1939 backlog_dev->quota -= work;
1942 list_del(&backlog_dev->poll_list);
1943 smp_mb__before_clear_bit();
1944 netif_poll_enable(backlog_dev);
1950 static void net_rx_action(struct softirq_action *h)
1952 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1953 unsigned long start_time = jiffies;
1954 int budget = netdev_budget;
1957 local_irq_disable();
1959 while (!list_empty(&queue->poll_list)) {
1960 struct net_device *dev;
1962 if (budget <= 0 || jiffies - start_time > 1)
1967 dev = list_entry(queue->poll_list.next,
1968 struct net_device, poll_list);
1969 have = netpoll_poll_lock(dev);
1971 if (dev->quota <= 0 || dev->poll(dev, &budget)) {
1972 netpoll_poll_unlock(have);
1973 local_irq_disable();
1974 list_move_tail(&dev->poll_list, &queue->poll_list);
1976 dev->quota += dev->weight;
1978 dev->quota = dev->weight;
1980 netpoll_poll_unlock(have);
1982 local_irq_disable();
1990 __get_cpu_var(netdev_rx_stat).time_squeeze++;
1991 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
1995 static gifconf_func_t * gifconf_list [NPROTO];
1998 * register_gifconf - register a SIOCGIF handler
1999 * @family: Address family
2000 * @gifconf: Function handler
2002 * Register protocol dependent address dumping routines. The handler
2003 * that is passed must not be freed or reused until it has been replaced
2004 * by another handler.
2006 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2008 if (family >= NPROTO)
2010 gifconf_list[family] = gifconf;
2016 * Map an interface index to its name (SIOCGIFNAME)
2020 * We need this ioctl for efficient implementation of the
2021 * if_indextoname() function required by the IPv6 API. Without
2022 * it, we would have to search all the interfaces to find a
2026 static int dev_ifname(struct ifreq __user *arg)
2028 struct net_device *dev;
2032 * Fetch the caller's info block.
2035 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2038 read_lock(&dev_base_lock);
2039 dev = __dev_get_by_index(ifr.ifr_ifindex);
2041 read_unlock(&dev_base_lock);
2045 strcpy(ifr.ifr_name, dev->name);
2046 read_unlock(&dev_base_lock);
2048 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2054 * Perform a SIOCGIFCONF call. This structure will change
2055 * size eventually, and there is nothing I can do about it.
2056 * Thus we will need a 'compatibility mode'.
2059 static int dev_ifconf(char __user *arg)
2062 struct net_device *dev;
2069 * Fetch the caller's info block.
2072 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2079 * Loop over the interfaces, and write an info block for each.
2083 for (dev = dev_base; dev; dev = dev->next) {
2084 if (vx_flags(VXF_HIDE_NETIF, 0) &&
2085 !dev_in_nx_info(dev, current->nx_info))
2087 for (i = 0; i < NPROTO; i++) {
2088 if (gifconf_list[i]) {
2091 done = gifconf_list[i](dev, NULL, 0);
2093 done = gifconf_list[i](dev, pos + total,
2103 * All done. Write the updated control block back to the caller.
2105 ifc.ifc_len = total;
2108 * Both BSD and Solaris return 0 here, so we do too.
2110 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2113 #ifdef CONFIG_PROC_FS
2115 * This is invoked by the /proc filesystem handler to display a device
2118 static __inline__ struct net_device *dev_get_idx(loff_t pos)
2120 struct net_device *dev;
2123 for (i = 0, dev = dev_base; dev && i < pos; ++i, dev = dev->next);
2125 return i == pos ? dev : NULL;
2128 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2130 read_lock(&dev_base_lock);
2131 return *pos ? dev_get_idx(*pos - 1) : SEQ_START_TOKEN;
2134 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2137 return v == SEQ_START_TOKEN ? dev_base : ((struct net_device *)v)->next;
2140 void dev_seq_stop(struct seq_file *seq, void *v)
2142 read_unlock(&dev_base_lock);
2145 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2147 struct nx_info *nxi = current->nx_info;
2149 if (vx_flags(VXF_HIDE_NETIF, 0) && !dev_in_nx_info(dev, nxi))
2151 if (dev->get_stats) {
2152 struct net_device_stats *stats = dev->get_stats(dev);
2154 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2155 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2156 dev->name, stats->rx_bytes, stats->rx_packets,
2158 stats->rx_dropped + stats->rx_missed_errors,
2159 stats->rx_fifo_errors,
2160 stats->rx_length_errors + stats->rx_over_errors +
2161 stats->rx_crc_errors + stats->rx_frame_errors,
2162 stats->rx_compressed, stats->multicast,
2163 stats->tx_bytes, stats->tx_packets,
2164 stats->tx_errors, stats->tx_dropped,
2165 stats->tx_fifo_errors, stats->collisions,
2166 stats->tx_carrier_errors +
2167 stats->tx_aborted_errors +
2168 stats->tx_window_errors +
2169 stats->tx_heartbeat_errors,
2170 stats->tx_compressed);
2172 seq_printf(seq, "%6s: No statistics available.\n", dev->name);
2176 * Called from the PROCfs module. This now uses the new arbitrary sized
2177 * /proc/net interface to create /proc/net/dev
2179 static int dev_seq_show(struct seq_file *seq, void *v)
2181 if (v == SEQ_START_TOKEN)
2182 seq_puts(seq, "Inter-| Receive "
2184 " face |bytes packets errs drop fifo frame "
2185 "compressed multicast|bytes packets errs "
2186 "drop fifo colls carrier compressed\n");
2188 dev_seq_printf_stats(seq, v);
2192 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2194 struct netif_rx_stats *rc = NULL;
2196 while (*pos < NR_CPUS)
2197 if (cpu_online(*pos)) {
2198 rc = &per_cpu(netdev_rx_stat, *pos);
2205 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2207 return softnet_get_online(pos);
2210 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2213 return softnet_get_online(pos);
2216 static void softnet_seq_stop(struct seq_file *seq, void *v)
2220 static int softnet_seq_show(struct seq_file *seq, void *v)
2222 struct netif_rx_stats *s = v;
2224 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2225 s->total, s->dropped, s->time_squeeze, 0,
2226 0, 0, 0, 0, /* was fastroute */
2231 static struct seq_operations dev_seq_ops = {
2232 .start = dev_seq_start,
2233 .next = dev_seq_next,
2234 .stop = dev_seq_stop,
2235 .show = dev_seq_show,
2238 static int dev_seq_open(struct inode *inode, struct file *file)
2240 return seq_open(file, &dev_seq_ops);
2243 static struct file_operations dev_seq_fops = {
2244 .owner = THIS_MODULE,
2245 .open = dev_seq_open,
2247 .llseek = seq_lseek,
2248 .release = seq_release,
2251 static struct seq_operations softnet_seq_ops = {
2252 .start = softnet_seq_start,
2253 .next = softnet_seq_next,
2254 .stop = softnet_seq_stop,
2255 .show = softnet_seq_show,
2258 static int softnet_seq_open(struct inode *inode, struct file *file)
2260 return seq_open(file, &softnet_seq_ops);
2263 static struct file_operations softnet_seq_fops = {
2264 .owner = THIS_MODULE,
2265 .open = softnet_seq_open,
2267 .llseek = seq_lseek,
2268 .release = seq_release,
2271 #ifdef CONFIG_WIRELESS_EXT
2272 extern int wireless_proc_init(void);
2274 #define wireless_proc_init() 0
2277 static int __init dev_proc_init(void)
2281 if (!proc_net_fops_create("dev", S_IRUGO, &dev_seq_fops))
2283 if (!proc_net_fops_create("softnet_stat", S_IRUGO, &softnet_seq_fops))
2285 if (wireless_proc_init())
2291 proc_net_remove("softnet_stat");
2293 proc_net_remove("dev");
2297 #define dev_proc_init() 0
2298 #endif /* CONFIG_PROC_FS */
2302 * netdev_set_master - set up master/slave pair
2303 * @slave: slave device
2304 * @master: new master device
2306 * Changes the master device of the slave. Pass %NULL to break the
2307 * bonding. The caller must hold the RTNL semaphore. On a failure
2308 * a negative errno code is returned. On success the reference counts
2309 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2310 * function returns zero.
2312 int netdev_set_master(struct net_device *slave, struct net_device *master)
2314 struct net_device *old = slave->master;
2324 slave->master = master;
2332 slave->flags |= IFF_SLAVE;
2334 slave->flags &= ~IFF_SLAVE;
2336 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2341 * dev_set_promiscuity - update promiscuity count on a device
2345 * Add or remove promiscuity from a device. While the count in the device
2346 * remains above zero the interface remains promiscuous. Once it hits zero
2347 * the device reverts back to normal filtering operation. A negative inc
2348 * value is used to drop promiscuity on the device.
2350 void dev_set_promiscuity(struct net_device *dev, int inc)
2352 unsigned short old_flags = dev->flags;
2354 if ((dev->promiscuity += inc) == 0)
2355 dev->flags &= ~IFF_PROMISC;
2357 dev->flags |= IFF_PROMISC;
2358 if (dev->flags != old_flags) {
2360 printk(KERN_INFO "device %s %s promiscuous mode\n",
2361 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2363 audit_log(current->audit_context, GFP_ATOMIC,
2364 AUDIT_ANOM_PROMISCUOUS,
2365 "dev=%s prom=%d old_prom=%d auid=%u",
2366 dev->name, (dev->flags & IFF_PROMISC),
2367 (old_flags & IFF_PROMISC),
2368 audit_get_loginuid(current->audit_context));
2373 * dev_set_allmulti - update allmulti count on a device
2377 * Add or remove reception of all multicast frames to a device. While the
2378 * count in the device remains above zero the interface remains listening
2379 * to all interfaces. Once it hits zero the device reverts back to normal
2380 * filtering operation. A negative @inc value is used to drop the counter
2381 * when releasing a resource needing all multicasts.
2384 void dev_set_allmulti(struct net_device *dev, int inc)
2386 unsigned short old_flags = dev->flags;
2388 dev->flags |= IFF_ALLMULTI;
2389 if ((dev->allmulti += inc) == 0)
2390 dev->flags &= ~IFF_ALLMULTI;
2391 if (dev->flags ^ old_flags)
2395 unsigned dev_get_flags(const struct net_device *dev)
2399 flags = (dev->flags & ~(IFF_PROMISC |
2404 (dev->gflags & (IFF_PROMISC |
2407 if (netif_running(dev)) {
2408 if (netif_oper_up(dev))
2409 flags |= IFF_RUNNING;
2410 if (netif_carrier_ok(dev))
2411 flags |= IFF_LOWER_UP;
2412 if (netif_dormant(dev))
2413 flags |= IFF_DORMANT;
2419 int dev_change_flags(struct net_device *dev, unsigned flags)
2422 int old_flags = dev->flags;
2425 * Set the flags on our device.
2428 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
2429 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
2431 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
2435 * Load in the correct multicast list now the flags have changed.
2441 * Have we downed the interface. We handle IFF_UP ourselves
2442 * according to user attempts to set it, rather than blindly
2447 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
2448 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
2454 if (dev->flags & IFF_UP &&
2455 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
2457 raw_notifier_call_chain(&netdev_chain,
2458 NETDEV_CHANGE, dev);
2460 if ((flags ^ dev->gflags) & IFF_PROMISC) {
2461 int inc = (flags & IFF_PROMISC) ? +1 : -1;
2462 dev->gflags ^= IFF_PROMISC;
2463 dev_set_promiscuity(dev, inc);
2466 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
2467 is important. Some (broken) drivers set IFF_PROMISC, when
2468 IFF_ALLMULTI is requested not asking us and not reporting.
2470 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
2471 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
2472 dev->gflags ^= IFF_ALLMULTI;
2473 dev_set_allmulti(dev, inc);
2476 if (old_flags ^ dev->flags)
2477 rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags ^ dev->flags);
2482 int dev_set_mtu(struct net_device *dev, int new_mtu)
2486 if (new_mtu == dev->mtu)
2489 /* MTU must be positive. */
2493 if (!netif_device_present(dev))
2497 if (dev->change_mtu)
2498 err = dev->change_mtu(dev, new_mtu);
2501 if (!err && dev->flags & IFF_UP)
2502 raw_notifier_call_chain(&netdev_chain,
2503 NETDEV_CHANGEMTU, dev);
2507 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
2511 if (!dev->set_mac_address)
2513 if (sa->sa_family != dev->type)
2515 if (!netif_device_present(dev))
2517 err = dev->set_mac_address(dev, sa);
2519 raw_notifier_call_chain(&netdev_chain,
2520 NETDEV_CHANGEADDR, dev);
2525 * Perform the SIOCxIFxxx calls.
2527 static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
2530 struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
2536 case SIOCGIFFLAGS: /* Get interface flags */
2537 ifr->ifr_flags = dev_get_flags(dev);
2540 case SIOCSIFFLAGS: /* Set interface flags */
2541 return dev_change_flags(dev, ifr->ifr_flags);
2543 case SIOCGIFMETRIC: /* Get the metric on the interface
2544 (currently unused) */
2545 ifr->ifr_metric = 0;
2548 case SIOCSIFMETRIC: /* Set the metric on the interface
2549 (currently unused) */
2552 case SIOCGIFMTU: /* Get the MTU of a device */
2553 ifr->ifr_mtu = dev->mtu;
2556 case SIOCSIFMTU: /* Set the MTU of a device */
2557 return dev_set_mtu(dev, ifr->ifr_mtu);
2561 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
2563 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
2564 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2565 ifr->ifr_hwaddr.sa_family = dev->type;
2569 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
2571 case SIOCSIFHWBROADCAST:
2572 if (ifr->ifr_hwaddr.sa_family != dev->type)
2574 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
2575 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2576 raw_notifier_call_chain(&netdev_chain,
2577 NETDEV_CHANGEADDR, dev);
2581 ifr->ifr_map.mem_start = dev->mem_start;
2582 ifr->ifr_map.mem_end = dev->mem_end;
2583 ifr->ifr_map.base_addr = dev->base_addr;
2584 ifr->ifr_map.irq = dev->irq;
2585 ifr->ifr_map.dma = dev->dma;
2586 ifr->ifr_map.port = dev->if_port;
2590 if (dev->set_config) {
2591 if (!netif_device_present(dev))
2593 return dev->set_config(dev, &ifr->ifr_map);
2598 if (!dev->set_multicast_list ||
2599 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2601 if (!netif_device_present(dev))
2603 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
2607 if (!dev->set_multicast_list ||
2608 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2610 if (!netif_device_present(dev))
2612 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
2616 ifr->ifr_ifindex = dev->ifindex;
2620 ifr->ifr_qlen = dev->tx_queue_len;
2624 if (ifr->ifr_qlen < 0)
2626 dev->tx_queue_len = ifr->ifr_qlen;
2630 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
2631 return dev_change_name(dev, ifr->ifr_newname);
2634 * Unknown or private ioctl
2638 if ((cmd >= SIOCDEVPRIVATE &&
2639 cmd <= SIOCDEVPRIVATE + 15) ||
2640 cmd == SIOCBONDENSLAVE ||
2641 cmd == SIOCBONDRELEASE ||
2642 cmd == SIOCBONDSETHWADDR ||
2643 cmd == SIOCBONDSLAVEINFOQUERY ||
2644 cmd == SIOCBONDINFOQUERY ||
2645 cmd == SIOCBONDCHANGEACTIVE ||
2646 cmd == SIOCGMIIPHY ||
2647 cmd == SIOCGMIIREG ||
2648 cmd == SIOCSMIIREG ||
2649 cmd == SIOCBRADDIF ||
2650 cmd == SIOCBRDELIF ||
2651 cmd == SIOCWANDEV) {
2653 if (dev->do_ioctl) {
2654 if (netif_device_present(dev))
2655 err = dev->do_ioctl(dev, ifr,
2668 * This function handles all "interface"-type I/O control requests. The actual
2669 * 'doing' part of this is dev_ifsioc above.
2673 * dev_ioctl - network device ioctl
2674 * @cmd: command to issue
2675 * @arg: pointer to a struct ifreq in user space
2677 * Issue ioctl functions to devices. This is normally called by the
2678 * user space syscall interfaces but can sometimes be useful for
2679 * other purposes. The return value is the return from the syscall if
2680 * positive or a negative errno code on error.
2683 int dev_ioctl(unsigned int cmd, void __user *arg)
2689 /* One special case: SIOCGIFCONF takes ifconf argument
2690 and requires shared lock, because it sleeps writing
2694 if (cmd == SIOCGIFCONF) {
2696 ret = dev_ifconf((char __user *) arg);
2700 if (cmd == SIOCGIFNAME)
2701 return dev_ifname((struct ifreq __user *)arg);
2703 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2706 ifr.ifr_name[IFNAMSIZ-1] = 0;
2708 colon = strchr(ifr.ifr_name, ':');
2713 * See which interface the caller is talking about.
2718 * These ioctl calls:
2719 * - can be done by all.
2720 * - atomic and do not require locking.
2731 dev_load(ifr.ifr_name);
2732 read_lock(&dev_base_lock);
2733 ret = dev_ifsioc(&ifr, cmd);
2734 read_unlock(&dev_base_lock);
2738 if (copy_to_user(arg, &ifr,
2739 sizeof(struct ifreq)))
2745 dev_load(ifr.ifr_name);
2747 ret = dev_ethtool(&ifr);
2752 if (copy_to_user(arg, &ifr,
2753 sizeof(struct ifreq)))
2759 * These ioctl calls:
2760 * - require superuser power.
2761 * - require strict serialization.
2767 if (!capable(CAP_NET_ADMIN))
2769 dev_load(ifr.ifr_name);
2771 ret = dev_ifsioc(&ifr, cmd);
2776 if (copy_to_user(arg, &ifr,
2777 sizeof(struct ifreq)))
2783 * These ioctl calls:
2784 * - require superuser power.
2785 * - require strict serialization.
2786 * - do not return a value
2796 case SIOCSIFHWBROADCAST:
2799 case SIOCBONDENSLAVE:
2800 case SIOCBONDRELEASE:
2801 case SIOCBONDSETHWADDR:
2802 case SIOCBONDCHANGEACTIVE:
2805 if (!capable(CAP_NET_ADMIN))
2808 case SIOCBONDSLAVEINFOQUERY:
2809 case SIOCBONDINFOQUERY:
2810 dev_load(ifr.ifr_name);
2812 ret = dev_ifsioc(&ifr, cmd);
2817 /* Get the per device memory space. We can add this but
2818 * currently do not support it */
2820 /* Set the per device memory buffer space.
2821 * Not applicable in our case */
2826 * Unknown or private ioctl.
2829 if (cmd == SIOCWANDEV ||
2830 (cmd >= SIOCDEVPRIVATE &&
2831 cmd <= SIOCDEVPRIVATE + 15)) {
2832 dev_load(ifr.ifr_name);
2834 ret = dev_ifsioc(&ifr, cmd);
2836 if (!ret && copy_to_user(arg, &ifr,
2837 sizeof(struct ifreq)))
2841 #ifdef CONFIG_WIRELESS_EXT
2842 /* Take care of Wireless Extensions */
2843 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
2844 /* If command is `set a parameter', or
2845 * `get the encoding parameters', check if
2846 * the user has the right to do it */
2847 if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE
2848 || cmd == SIOCGIWENCODEEXT) {
2849 if (!capable(CAP_NET_ADMIN))
2852 dev_load(ifr.ifr_name);
2854 /* Follow me in net/core/wireless.c */
2855 ret = wireless_process_ioctl(&ifr, cmd);
2857 if (IW_IS_GET(cmd) &&
2858 copy_to_user(arg, &ifr,
2859 sizeof(struct ifreq)))
2863 #endif /* CONFIG_WIRELESS_EXT */
2870 * dev_new_index - allocate an ifindex
2872 * Returns a suitable unique value for a new device interface
2873 * number. The caller must hold the rtnl semaphore or the
2874 * dev_base_lock to be sure it remains unique.
2876 static int dev_new_index(void)
2882 if (!__dev_get_by_index(ifindex))
2887 static int dev_boot_phase = 1;
2889 /* Delayed registration/unregisteration */
2890 static DEFINE_SPINLOCK(net_todo_list_lock);
2891 static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);
2893 static inline void net_set_todo(struct net_device *dev)
2895 spin_lock(&net_todo_list_lock);
2896 list_add_tail(&dev->todo_list, &net_todo_list);
2897 spin_unlock(&net_todo_list_lock);
2901 * register_netdevice - register a network device
2902 * @dev: device to register
2904 * Take a completed network device structure and add it to the kernel
2905 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
2906 * chain. 0 is returned on success. A negative errno code is returned
2907 * on a failure to set up the device, or if the name is a duplicate.
2909 * Callers must hold the rtnl semaphore. You may want
2910 * register_netdev() instead of this.
2913 * The locking appears insufficient to guarantee two parallel registers
2914 * will not get the same name.
2917 int register_netdevice(struct net_device *dev)
2919 struct hlist_head *head;
2920 struct hlist_node *p;
2923 BUG_ON(dev_boot_phase);
2928 /* When net_device's are persistent, this will be fatal. */
2929 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
2931 spin_lock_init(&dev->queue_lock);
2932 spin_lock_init(&dev->_xmit_lock);
2933 dev->xmit_lock_owner = -1;
2934 #ifdef CONFIG_NET_CLS_ACT
2935 spin_lock_init(&dev->ingress_lock);
2938 ret = alloc_divert_blk(dev);
2944 /* Init, if this function is available */
2946 ret = dev->init(dev);
2954 if (!dev_valid_name(dev->name)) {
2959 dev->ifindex = dev_new_index();
2960 if (dev->iflink == -1)
2961 dev->iflink = dev->ifindex;
2963 /* Check for existence of name */
2964 head = dev_name_hash(dev->name);
2965 hlist_for_each(p, head) {
2966 struct net_device *d
2967 = hlist_entry(p, struct net_device, name_hlist);
2968 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
2974 /* Fix illegal SG+CSUM combinations. */
2975 if ((dev->features & NETIF_F_SG) &&
2976 !(dev->features & NETIF_F_ALL_CSUM)) {
2977 printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
2979 dev->features &= ~NETIF_F_SG;
2982 /* TSO requires that SG is present as well. */
2983 if ((dev->features & NETIF_F_TSO) &&
2984 !(dev->features & NETIF_F_SG)) {
2985 printk("%s: Dropping NETIF_F_TSO since no SG feature.\n",
2987 dev->features &= ~NETIF_F_TSO;
2989 if (dev->features & NETIF_F_UFO) {
2990 if (!(dev->features & NETIF_F_HW_CSUM)) {
2991 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
2992 "NETIF_F_HW_CSUM feature.\n",
2994 dev->features &= ~NETIF_F_UFO;
2996 if (!(dev->features & NETIF_F_SG)) {
2997 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
2998 "NETIF_F_SG feature.\n",
3000 dev->features &= ~NETIF_F_UFO;
3005 * nil rebuild_header routine,
3006 * that should be never called and used as just bug trap.
3009 if (!dev->rebuild_header)
3010 dev->rebuild_header = default_rebuild_header;
3012 ret = netdev_register_sysfs(dev);
3015 dev->reg_state = NETREG_REGISTERED;
3018 * Default initial state at registry is that the
3019 * device is present.
3022 set_bit(__LINK_STATE_PRESENT, &dev->state);
3025 dev_init_scheduler(dev);
3026 write_lock_bh(&dev_base_lock);
3028 dev_tail = &dev->next;
3029 hlist_add_head(&dev->name_hlist, head);
3030 hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex));
3032 write_unlock_bh(&dev_base_lock);
3034 /* Notify protocols, that a new device appeared. */
3035 raw_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
3042 free_divert_blk(dev);
3047 * register_netdev - register a network device
3048 * @dev: device to register
3050 * Take a completed network device structure and add it to the kernel
3051 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3052 * chain. 0 is returned on success. A negative errno code is returned
3053 * on a failure to set up the device, or if the name is a duplicate.
3055 * This is a wrapper around register_netdev that takes the rtnl semaphore
3056 * and expands the device name if you passed a format string to
3059 int register_netdev(struct net_device *dev)
3066 * If the name is a format string the caller wants us to do a
3069 if (strchr(dev->name, '%')) {
3070 err = dev_alloc_name(dev, dev->name);
3076 * Back compatibility hook. Kill this one in 2.5
3078 if (dev->name[0] == 0 || dev->name[0] == ' ') {
3079 err = dev_alloc_name(dev, "eth%d");
3084 err = register_netdevice(dev);
3089 EXPORT_SYMBOL(register_netdev);
3092 * netdev_wait_allrefs - wait until all references are gone.
3094 * This is called when unregistering network devices.
3096 * Any protocol or device that holds a reference should register
3097 * for netdevice notification, and cleanup and put back the
3098 * reference if they receive an UNREGISTER event.
3099 * We can get stuck here if buggy protocols don't correctly
3102 static void netdev_wait_allrefs(struct net_device *dev)
3104 unsigned long rebroadcast_time, warning_time;
3106 rebroadcast_time = warning_time = jiffies;
3107 while (atomic_read(&dev->refcnt) != 0) {
3108 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
3111 /* Rebroadcast unregister notification */
3112 raw_notifier_call_chain(&netdev_chain,
3113 NETDEV_UNREGISTER, dev);
3115 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
3117 /* We must not have linkwatch events
3118 * pending on unregister. If this
3119 * happens, we simply run the queue
3120 * unscheduled, resulting in a noop
3123 linkwatch_run_queue();
3128 rebroadcast_time = jiffies;
3133 if (time_after(jiffies, warning_time + 10 * HZ)) {
3134 printk(KERN_EMERG "unregister_netdevice: "
3135 "waiting for %s to become free. Usage "
3137 dev->name, atomic_read(&dev->refcnt));
3138 warning_time = jiffies;
3147 * register_netdevice(x1);
3148 * register_netdevice(x2);
3150 * unregister_netdevice(y1);
3151 * unregister_netdevice(y2);
3157 * We are invoked by rtnl_unlock() after it drops the semaphore.
3158 * This allows us to deal with problems:
3159 * 1) We can delete sysfs objects which invoke hotplug
3160 * without deadlocking with linkwatch via keventd.
3161 * 2) Since we run with the RTNL semaphore not held, we can sleep
3162 * safely in order to wait for the netdev refcnt to drop to zero.
3164 static DEFINE_MUTEX(net_todo_run_mutex);
3165 void netdev_run_todo(void)
3167 struct list_head list = LIST_HEAD_INIT(list);
3169 /* Need to guard against multiple cpu's getting out of order. */
3170 mutex_lock(&net_todo_run_mutex);
3172 /* Not safe to do outside the semaphore. We must not return
3173 * until all unregister events invoked by the local processor
3174 * have been completed (either by this todo run, or one on
3177 if (list_empty(&net_todo_list))
3180 /* Snapshot list, allow later requests */
3181 spin_lock(&net_todo_list_lock);
3182 list_splice_init(&net_todo_list, &list);
3183 spin_unlock(&net_todo_list_lock);
3185 while (!list_empty(&list)) {
3186 struct net_device *dev
3187 = list_entry(list.next, struct net_device, todo_list);
3188 list_del(&dev->todo_list);
3190 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
3191 printk(KERN_ERR "network todo '%s' but state %d\n",
3192 dev->name, dev->reg_state);
3197 netdev_unregister_sysfs(dev);
3198 dev->reg_state = NETREG_UNREGISTERED;
3200 netdev_wait_allrefs(dev);
3203 BUG_ON(atomic_read(&dev->refcnt));
3204 BUG_TRAP(!dev->ip_ptr);
3205 BUG_TRAP(!dev->ip6_ptr);
3206 BUG_TRAP(!dev->dn_ptr);
3208 /* It must be the very last action,
3209 * after this 'dev' may point to freed up memory.
3211 if (dev->destructor)
3212 dev->destructor(dev);
3216 mutex_unlock(&net_todo_run_mutex);
3220 * alloc_netdev - allocate network device
3221 * @sizeof_priv: size of private data to allocate space for
3222 * @name: device name format string
3223 * @setup: callback to initialize device
3225 * Allocates a struct net_device with private data area for driver use
3226 * and performs basic initialization.
3228 struct net_device *alloc_netdev(int sizeof_priv, const char *name,
3229 void (*setup)(struct net_device *))
3232 struct net_device *dev;
3235 /* ensure 32-byte alignment of both the device and private area */
3236 alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
3237 alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
3239 p = kzalloc(alloc_size, GFP_KERNEL);
3241 printk(KERN_ERR "alloc_dev: Unable to allocate device.\n");
3245 dev = (struct net_device *)
3246 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
3247 dev->padded = (char *)dev - (char *)p;
3250 dev->priv = netdev_priv(dev);
3253 strcpy(dev->name, name);
3256 EXPORT_SYMBOL(alloc_netdev);
3259 * free_netdev - free network device
3262 * This function does the last stage of destroying an allocated device
3263 * interface. The reference to the device object is released.
3264 * If this is the last reference then it will be freed.
3266 void free_netdev(struct net_device *dev)
3269 /* Compatibility with error handling in drivers */
3270 if (dev->reg_state == NETREG_UNINITIALIZED) {
3271 kfree((char *)dev - dev->padded);
3275 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
3276 dev->reg_state = NETREG_RELEASED;
3278 /* will free via class release */
3279 class_device_put(&dev->class_dev);
3281 kfree((char *)dev - dev->padded);
3285 /* Synchronize with packet receive processing. */
3286 void synchronize_net(void)
3293 * unregister_netdevice - remove device from the kernel
3296 * This function shuts down a device interface and removes it
3297 * from the kernel tables. On success 0 is returned, on a failure
3298 * a negative errno code is returned.
3300 * Callers must hold the rtnl semaphore. You may want
3301 * unregister_netdev() instead of this.
3304 int unregister_netdevice(struct net_device *dev)
3306 struct net_device *d, **dp;
3308 BUG_ON(dev_boot_phase);
3311 /* Some devices call without registering for initialization unwind. */
3312 if (dev->reg_state == NETREG_UNINITIALIZED) {
3313 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3314 "was registered\n", dev->name, dev);
3318 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3320 /* If device is running, close it first. */
3321 if (dev->flags & IFF_UP)
3324 /* And unlink it from device chain. */
3325 for (dp = &dev_base; (d = *dp) != NULL; dp = &d->next) {
3327 write_lock_bh(&dev_base_lock);
3328 hlist_del(&dev->name_hlist);
3329 hlist_del(&dev->index_hlist);
3330 if (dev_tail == &dev->next)
3333 write_unlock_bh(&dev_base_lock);
3338 printk(KERN_ERR "unregister net_device: '%s' not found\n",
3343 dev->reg_state = NETREG_UNREGISTERING;
3347 /* Shutdown queueing discipline. */
3351 /* Notify protocols, that we are about to destroy
3352 this device. They should clean all the things.
3354 raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
3357 * Flush the multicast chain
3359 dev_mc_discard(dev);
3364 /* Notifier chain MUST detach us from master device. */
3365 BUG_TRAP(!dev->master);
3367 free_divert_blk(dev);
3369 /* Finish processing unregister after unlock */
3379 * unregister_netdev - remove device from the kernel
3382 * This function shuts down a device interface and removes it
3383 * from the kernel tables. On success 0 is returned, on a failure
3384 * a negative errno code is returned.
3386 * This is just a wrapper for unregister_netdevice that takes
3387 * the rtnl semaphore. In general you want to use this and not
3388 * unregister_netdevice.
3390 void unregister_netdev(struct net_device *dev)
3393 unregister_netdevice(dev);
3397 EXPORT_SYMBOL(unregister_netdev);
3399 #ifdef CONFIG_HOTPLUG_CPU
3400 static int dev_cpu_callback(struct notifier_block *nfb,
3401 unsigned long action,
3404 struct sk_buff **list_skb;
3405 struct net_device **list_net;
3406 struct sk_buff *skb;
3407 unsigned int cpu, oldcpu = (unsigned long)ocpu;
3408 struct softnet_data *sd, *oldsd;
3410 if (action != CPU_DEAD)
3413 local_irq_disable();
3414 cpu = smp_processor_id();
3415 sd = &per_cpu(softnet_data, cpu);
3416 oldsd = &per_cpu(softnet_data, oldcpu);
3418 /* Find end of our completion_queue. */
3419 list_skb = &sd->completion_queue;
3421 list_skb = &(*list_skb)->next;
3422 /* Append completion queue from offline CPU. */
3423 *list_skb = oldsd->completion_queue;
3424 oldsd->completion_queue = NULL;
3426 /* Find end of our output_queue. */
3427 list_net = &sd->output_queue;
3429 list_net = &(*list_net)->next_sched;
3430 /* Append output queue from offline CPU. */
3431 *list_net = oldsd->output_queue;
3432 oldsd->output_queue = NULL;
3434 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3437 /* Process offline CPU's input_pkt_queue */
3438 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
3443 #endif /* CONFIG_HOTPLUG_CPU */
3447 * Initialize the DEV module. At boot time this walks the device list and
3448 * unhooks any devices that fail to initialise (normally hardware not
3449 * present) and leaves us with a valid list of present and active devices.
3454 * This is called single threaded during boot, so no need
3455 * to take the rtnl semaphore.
3457 static int __init net_dev_init(void)
3459 int i, rc = -ENOMEM;
3461 BUG_ON(!dev_boot_phase);
3465 if (dev_proc_init())
3468 if (netdev_sysfs_init())
3471 INIT_LIST_HEAD(&ptype_all);
3472 for (i = 0; i < 16; i++)
3473 INIT_LIST_HEAD(&ptype_base[i]);
3475 for (i = 0; i < ARRAY_SIZE(dev_name_head); i++)
3476 INIT_HLIST_HEAD(&dev_name_head[i]);
3478 for (i = 0; i < ARRAY_SIZE(dev_index_head); i++)
3479 INIT_HLIST_HEAD(&dev_index_head[i]);
3482 * Initialise the packet receive queues.
3485 for_each_possible_cpu(i) {
3486 struct softnet_data *queue;
3488 queue = &per_cpu(softnet_data, i);
3489 skb_queue_head_init(&queue->input_pkt_queue);
3490 queue->completion_queue = NULL;
3491 INIT_LIST_HEAD(&queue->poll_list);
3492 set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
3493 queue->backlog_dev.weight = weight_p;
3494 queue->backlog_dev.poll = process_backlog;
3495 atomic_set(&queue->backlog_dev.refcnt, 1);
3500 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
3501 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
3503 hotcpu_notifier(dev_cpu_callback, 0);
3511 subsys_initcall(net_dev_init);
3513 EXPORT_SYMBOL(__dev_get_by_index);
3514 EXPORT_SYMBOL(__dev_get_by_name);
3515 EXPORT_SYMBOL(__dev_remove_pack);
3516 EXPORT_SYMBOL(dev_valid_name);
3517 EXPORT_SYMBOL(dev_add_pack);
3518 EXPORT_SYMBOL(dev_alloc_name);
3519 EXPORT_SYMBOL(dev_close);
3520 EXPORT_SYMBOL(dev_get_by_flags);
3521 EXPORT_SYMBOL(dev_get_by_index);
3522 EXPORT_SYMBOL(dev_get_by_name);
3523 EXPORT_SYMBOL(dev_open);
3524 EXPORT_SYMBOL(dev_queue_xmit);
3525 EXPORT_SYMBOL(dev_remove_pack);
3526 EXPORT_SYMBOL(dev_set_allmulti);
3527 EXPORT_SYMBOL(dev_set_promiscuity);
3528 EXPORT_SYMBOL(dev_change_flags);
3529 EXPORT_SYMBOL(dev_set_mtu);
3530 EXPORT_SYMBOL(dev_set_mac_address);
3531 EXPORT_SYMBOL(free_netdev);
3532 EXPORT_SYMBOL(netdev_boot_setup_check);
3533 EXPORT_SYMBOL(netdev_set_master);
3534 EXPORT_SYMBOL(netdev_state_change);
3535 EXPORT_SYMBOL(netif_receive_skb);
3536 EXPORT_SYMBOL(netif_rx);
3537 EXPORT_SYMBOL(register_gifconf);
3538 EXPORT_SYMBOL(register_netdevice);
3539 EXPORT_SYMBOL(register_netdevice_notifier);
3540 EXPORT_SYMBOL(skb_checksum_help);
3541 EXPORT_SYMBOL(synchronize_net);
3542 EXPORT_SYMBOL(unregister_netdevice);
3543 EXPORT_SYMBOL(unregister_netdevice_notifier);
3544 EXPORT_SYMBOL(net_enable_timestamp);
3545 EXPORT_SYMBOL(net_disable_timestamp);
3546 EXPORT_SYMBOL(dev_get_flags);
3547 EXPORT_SYMBOL(skb_checksum_setup);
3549 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
3550 EXPORT_SYMBOL(br_handle_frame_hook);
3551 EXPORT_SYMBOL(br_fdb_get_hook);
3552 EXPORT_SYMBOL(br_fdb_put_hook);
3556 EXPORT_SYMBOL(dev_load);
3559 EXPORT_PER_CPU_SYMBOL(softnet_data);