2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <asm/bitops.h>
78 #include <linux/config.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/string.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/errno.h>
88 #include <linux/interrupt.h>
89 #include <linux/if_ether.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/notifier.h>
93 #include <linux/skbuff.h>
95 #include <linux/rtnetlink.h>
96 #include <linux/proc_fs.h>
97 #include <linux/seq_file.h>
98 #include <linux/stat.h>
99 #include <linux/if_bridge.h>
100 #include <linux/divert.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <linux/highmem.h>
105 #include <linux/init.h>
106 #include <linux/kmod.h>
107 #include <linux/module.h>
108 #include <linux/kallsyms.h>
109 #include <linux/netpoll.h>
110 #ifdef CONFIG_NET_RADIO
111 #include <linux/wireless.h> /* Note : will define WIRELESS_EXT */
112 #include <net/iw_handler.h>
113 #endif /* CONFIG_NET_RADIO */
114 #include <asm/current.h>
116 /* This define, if set, will randomly drop a packet when congestion
117 * is more than moderate. It helps fairness in the multi-interface
118 * case when one of them is a hog, but it kills performance for the
119 * single interface case so it is off now by default.
123 /* Setting this will sample the queue lengths and thus congestion
124 * via a timer instead of as each packet is received.
126 #undef OFFLINE_SAMPLE
129 * The list of packet types we will receive (as opposed to discard)
130 * and the routines to invoke.
132 * Why 16. Because with 16 the only overlap we get on a hash of the
133 * low nibble of the protocol value is RARP/SNAP/X.25.
135 * NOTE: That is no longer true with the addition of VLAN tags. Not
136 * sure which should go first, but I bet it won't make much
137 * difference if we are running VLANs. The good news is that
138 * this protocol won't be in the list unless compiled in, so
139 * the average user (w/out VLANs) will not be adversly affected.
156 static spinlock_t ptype_lock = SPIN_LOCK_UNLOCKED;
157 static struct list_head ptype_base[16]; /* 16 way hashed list */
158 static struct list_head ptype_all; /* Taps */
160 #ifdef OFFLINE_SAMPLE
161 static void sample_queue(unsigned long dummy);
162 static struct timer_list samp_timer = TIMER_INITIALIZER(sample_queue, 0, 0);
166 * The @dev_base list is protected by @dev_base_lock and the rtln
169 * Pure readers hold dev_base_lock for reading.
171 * Writers must hold the rtnl semaphore while they loop through the
172 * dev_base list, and hold dev_base_lock for writing when they do the
173 * actual updates. This allows pure readers to access the list even
174 * while a writer is preparing to update it.
176 * To put it another way, dev_base_lock is held for writing only to
177 * protect against pure readers; the rtnl semaphore provides the
178 * protection against other writers.
180 * See, for example usages, register_netdevice() and
181 * unregister_netdevice(), which must be called with the rtnl
184 struct net_device *dev_base;
185 struct net_device **dev_tail = &dev_base;
186 rwlock_t dev_base_lock = RW_LOCK_UNLOCKED;
188 EXPORT_SYMBOL(dev_base);
189 EXPORT_SYMBOL(dev_base_lock);
191 #define NETDEV_HASHBITS 8
192 static struct hlist_head dev_name_head[1<<NETDEV_HASHBITS];
193 static struct hlist_head dev_index_head[1<<NETDEV_HASHBITS];
195 static inline struct hlist_head *dev_name_hash(const char *name)
197 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
198 return &dev_name_head[hash & ((1<<NETDEV_HASHBITS)-1)];
201 static inline struct hlist_head *dev_index_hash(int ifindex)
203 return &dev_index_head[ifindex & ((1<<NETDEV_HASHBITS)-1)];
210 static struct notifier_block *netdev_chain;
213 * Device drivers call our routines to queue packets here. We empty the
214 * queue in the local softnet handler.
216 DEFINE_PER_CPU(struct softnet_data, softnet_data) = { 0, };
218 #ifdef CONFIG_NET_FASTROUTE
219 int netdev_fastroute;
220 int netdev_fastroute_obstacles;
223 extern int netdev_sysfs_init(void);
224 extern int netdev_register_sysfs(struct net_device *);
225 extern int netdev_unregister_sysfs(struct net_device *);
228 /*******************************************************************************
230 Protocol management and registration routines
232 *******************************************************************************/
241 * Add a protocol ID to the list. Now that the input handler is
242 * smarter we can dispense with all the messy stuff that used to be
245 * BEWARE!!! Protocol handlers, mangling input packets,
246 * MUST BE last in hash buckets and checking protocol handlers
247 * MUST start from promiscuous ptype_all chain in net_bh.
248 * It is true now, do not change it.
249 * Explanation follows: if protocol handler, mangling packet, will
250 * be the first on list, it is not able to sense, that packet
251 * is cloned and should be copied-on-write, so that it will
252 * change it and subsequent readers will get broken packet.
257 * dev_add_pack - add packet handler
258 * @pt: packet type declaration
260 * Add a protocol handler to the networking stack. The passed &packet_type
261 * is linked into kernel lists and may not be freed until it has been
262 * removed from the kernel lists.
264 * This call does not sleep therefore it can not
265 * guarantee all CPU's that are in middle of receiving packets
266 * will see the new packet type (until the next received packet).
269 void dev_add_pack(struct packet_type *pt)
273 spin_lock_bh(&ptype_lock);
274 #ifdef CONFIG_NET_FASTROUTE
275 if (pt->af_packet_priv) {
276 netdev_fastroute_obstacles++;
277 dev_clear_fastroute(pt->dev);
280 if (pt->type == htons(ETH_P_ALL)) {
282 list_add_rcu(&pt->list, &ptype_all);
284 hash = ntohs(pt->type) & 15;
285 list_add_rcu(&pt->list, &ptype_base[hash]);
287 spin_unlock_bh(&ptype_lock);
290 extern void linkwatch_run_queue(void);
295 * __dev_remove_pack - remove packet handler
296 * @pt: packet type declaration
298 * Remove a protocol handler that was previously added to the kernel
299 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
300 * from the kernel lists and can be freed or reused once this function
303 * The packet type might still be in use by receivers
304 * and must not be freed until after all the CPU's have gone
305 * through a quiescent state.
307 void __dev_remove_pack(struct packet_type *pt)
309 struct list_head *head;
310 struct packet_type *pt1;
312 spin_lock_bh(&ptype_lock);
314 if (pt->type == htons(ETH_P_ALL)) {
318 head = &ptype_base[ntohs(pt->type) & 15];
320 list_for_each_entry(pt1, head, list) {
322 #ifdef CONFIG_NET_FASTROUTE
323 if (pt->af_packet_priv)
324 netdev_fastroute_obstacles--;
326 list_del_rcu(&pt->list);
331 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
333 spin_unlock_bh(&ptype_lock);
336 * dev_remove_pack - remove packet handler
337 * @pt: packet type declaration
339 * Remove a protocol handler that was previously added to the kernel
340 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
341 * from the kernel lists and can be freed or reused once this function
344 * This call sleeps to guarantee that no CPU is looking at the packet
347 void dev_remove_pack(struct packet_type *pt)
349 __dev_remove_pack(pt);
354 /******************************************************************************
356 Device Boot-time Settings Routines
358 *******************************************************************************/
360 /* Boot time configuration table */
361 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
364 * netdev_boot_setup_add - add new setup entry
365 * @name: name of the device
366 * @map: configured settings for the device
368 * Adds new setup entry to the dev_boot_setup list. The function
369 * returns 0 on error and 1 on success. This is a generic routine to
372 int netdev_boot_setup_add(char *name, struct ifmap *map)
374 struct netdev_boot_setup *s;
378 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
379 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
380 memset(s[i].name, 0, sizeof(s[i].name));
381 strcpy(s[i].name, name);
382 memcpy(&s[i].map, map, sizeof(s[i].map));
387 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
391 * netdev_boot_setup_check - check boot time settings
392 * @dev: the netdevice
394 * Check boot time settings for the device.
395 * The found settings are set for the device to be used
396 * later in the device probing.
397 * Returns 0 if no settings found, 1 if they are.
399 int netdev_boot_setup_check(struct net_device *dev)
401 struct netdev_boot_setup *s = dev_boot_setup;
404 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
405 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
406 !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
407 dev->irq = s[i].map.irq;
408 dev->base_addr = s[i].map.base_addr;
409 dev->mem_start = s[i].map.mem_start;
410 dev->mem_end = s[i].map.mem_end;
419 * netdev_boot_base - get address from boot time settings
420 * @prefix: prefix for network device
421 * @unit: id for network device
423 * Check boot time settings for the base address of device.
424 * The found settings are set for the device to be used
425 * later in the device probing.
426 * Returns 0 if no settings found.
428 unsigned long netdev_boot_base(const char *prefix, int unit)
430 const struct netdev_boot_setup *s = dev_boot_setup;
434 sprintf(name, "%s%d", prefix, unit);
437 * If device already registered then return base of 1
438 * to indicate not to probe for this interface
440 if (__dev_get_by_name(name))
443 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
444 if (!strcmp(name, s[i].name))
445 return s[i].map.base_addr;
450 * Saves at boot time configured settings for any netdevice.
452 int __init netdev_boot_setup(char *str)
457 str = get_options(str, ARRAY_SIZE(ints), ints);
462 memset(&map, 0, sizeof(map));
466 map.base_addr = ints[2];
468 map.mem_start = ints[3];
470 map.mem_end = ints[4];
472 /* Add new entry to the list */
473 return netdev_boot_setup_add(str, &map);
476 __setup("netdev=", netdev_boot_setup);
478 /*******************************************************************************
480 Device Interface Subroutines
482 *******************************************************************************/
485 * __dev_get_by_name - find a device by its name
486 * @name: name to find
488 * Find an interface by name. Must be called under RTNL semaphore
489 * or @dev_base_lock. If the name is found a pointer to the device
490 * is returned. If the name is not found then %NULL is returned. The
491 * reference counters are not incremented so the caller must be
492 * careful with locks.
495 struct net_device *__dev_get_by_name(const char *name)
497 struct hlist_node *p;
499 hlist_for_each(p, dev_name_hash(name)) {
500 struct net_device *dev
501 = hlist_entry(p, struct net_device, name_hlist);
502 if (!strncmp(dev->name, name, IFNAMSIZ))
509 * dev_get_by_name - find a device by its name
510 * @name: name to find
512 * Find an interface by name. This can be called from any
513 * context and does its own locking. The returned handle has
514 * the usage count incremented and the caller must use dev_put() to
515 * release it when it is no longer needed. %NULL is returned if no
516 * matching device is found.
519 struct net_device *dev_get_by_name(const char *name)
521 struct net_device *dev;
523 read_lock(&dev_base_lock);
524 dev = __dev_get_by_name(name);
527 read_unlock(&dev_base_lock);
532 Return value is changed to int to prevent illegal usage in future.
533 It is still legal to use to check for device existence.
535 User should understand, that the result returned by this function
536 is meaningless, if it was not issued under rtnl semaphore.
540 * dev_get - test if a device exists
541 * @name: name to test for
543 * Test if a name exists. Returns true if the name is found. In order
544 * to be sure the name is not allocated or removed during the test the
545 * caller must hold the rtnl semaphore.
547 * This function exists only for back compatibility with older
550 int __dev_get(const char *name)
552 struct net_device *dev;
554 read_lock(&dev_base_lock);
555 dev = __dev_get_by_name(name);
556 read_unlock(&dev_base_lock);
561 * __dev_get_by_index - find a device by its ifindex
562 * @ifindex: index of device
564 * Search for an interface by index. Returns %NULL if the device
565 * is not found or a pointer to the device. The device has not
566 * had its reference counter increased so the caller must be careful
567 * about locking. The caller must hold either the RTNL semaphore
571 struct net_device *__dev_get_by_index(int ifindex)
573 struct hlist_node *p;
575 hlist_for_each(p, dev_index_hash(ifindex)) {
576 struct net_device *dev
577 = hlist_entry(p, struct net_device, index_hlist);
578 if (dev->ifindex == ifindex)
586 * dev_get_by_index - find a device by its ifindex
587 * @ifindex: index of device
589 * Search for an interface by index. Returns NULL if the device
590 * is not found or a pointer to the device. The device returned has
591 * had a reference added and the pointer is safe until the user calls
592 * dev_put to indicate they have finished with it.
595 struct net_device *dev_get_by_index(int ifindex)
597 struct net_device *dev;
599 read_lock(&dev_base_lock);
600 dev = __dev_get_by_index(ifindex);
603 read_unlock(&dev_base_lock);
608 * dev_getbyhwaddr - find a device by its hardware address
609 * @type: media type of device
610 * @ha: hardware address
612 * Search for an interface by MAC address. Returns NULL if the device
613 * is not found or a pointer to the device. The caller must hold the
614 * rtnl semaphore. The returned device has not had its ref count increased
615 * and the caller must therefore be careful about locking
618 * If the API was consistent this would be __dev_get_by_hwaddr
621 struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
623 struct net_device *dev;
627 for (dev = dev_base; dev; dev = dev->next)
628 if (dev->type == type &&
629 !memcmp(dev->dev_addr, ha, dev->addr_len))
634 struct net_device *__dev_getfirstbyhwtype(unsigned short type)
636 struct net_device *dev;
638 for (dev = dev_base; dev; dev = dev->next)
639 if (dev->type == type)
644 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
646 struct net_device *dev_getfirstbyhwtype(unsigned short type)
648 struct net_device *dev;
651 dev = __dev_getfirstbyhwtype(type);
658 EXPORT_SYMBOL(dev_getfirstbyhwtype);
661 * dev_get_by_flags - find any device with given flags
662 * @if_flags: IFF_* values
663 * @mask: bitmask of bits in if_flags to check
665 * Search for any interface with the given flags. Returns NULL if a device
666 * is not found or a pointer to the device. The device returned has
667 * had a reference added and the pointer is safe until the user calls
668 * dev_put to indicate they have finished with it.
671 struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask)
673 struct net_device *dev;
675 read_lock(&dev_base_lock);
676 dev = __dev_get_by_flags(if_flags, mask);
679 read_unlock(&dev_base_lock);
684 * __dev_get_by_flags - find any device with given flags
685 * @if_flags: IFF_* values
686 * @mask: bitmask of bits in if_flags to check
688 * Search for any interface with the given flags. Returns NULL if a device
689 * is not found or a pointer to the device. The caller must hold either
690 * the RTNL semaphore or @dev_base_lock.
693 struct net_device *__dev_get_by_flags(unsigned short if_flags, unsigned short mask)
695 struct net_device *dev;
697 for (dev = dev_base; dev != NULL; dev = dev->next) {
698 if (((dev->flags ^ if_flags) & mask) == 0)
705 * dev_valid_name - check if name is okay for network device
708 * Network device names need to be valid file names to
709 * to allow sysfs to work
711 int dev_valid_name(const char *name)
713 return !(*name == '\0'
714 || !strcmp(name, ".")
715 || !strcmp(name, "..")
716 || strchr(name, '/'));
720 * dev_alloc_name - allocate a name for a device
722 * @name: name format string
724 * Passed a format string - eg "lt%d" it will try and find a suitable
725 * id. Not efficient for many devices, not called a lot. The caller
726 * must hold the dev_base or rtnl lock while allocating the name and
727 * adding the device in order to avoid duplicates. Returns the number
728 * of the unit assigned or a negative errno code.
731 int dev_alloc_name(struct net_device *dev, const char *name)
736 const int max_netdevices = 8*PAGE_SIZE;
738 struct net_device *d;
740 p = strnchr(name, IFNAMSIZ-1, '%');
743 * Verify the string as this thing may have come from
744 * the user. There must be either one "%d" and no other "%"
747 if (p[1] != 'd' || strchr(p + 2, '%'))
750 /* Use one page as a bit array of possible slots */
751 inuse = (long *) get_zeroed_page(GFP_ATOMIC);
755 for (d = dev_base; d; d = d->next) {
756 if (!sscanf(d->name, name, &i))
758 if (i < 0 || i >= max_netdevices)
761 /* avoid cases where sscanf is not exact inverse of printf */
762 snprintf(buf, sizeof(buf), name, i);
763 if (!strncmp(buf, d->name, IFNAMSIZ))
767 i = find_first_zero_bit(inuse, max_netdevices);
768 free_page((unsigned long) inuse);
771 snprintf(buf, sizeof(buf), name, i);
772 if (!__dev_get_by_name(buf)) {
773 strlcpy(dev->name, buf, IFNAMSIZ);
777 /* It is possible to run out of possible slots
778 * when the name is long and there isn't enough space left
779 * for the digits, or if all bits are used.
786 * dev_change_name - change name of a device
788 * @name: name (or format string) must be at least IFNAMSIZ
790 * Change name of a device, can pass format strings "eth%d".
793 int dev_change_name(struct net_device *dev, char *newname)
797 if (dev->flags & IFF_UP)
800 if (!dev_valid_name(newname))
803 if (strchr(newname, '%')) {
804 int err = dev_alloc_name(dev, newname);
807 strcpy(newname, dev->name);
809 else if (__dev_get_by_name(newname))
812 strlcpy(dev->name, newname, IFNAMSIZ);
814 hlist_del(&dev->name_hlist);
815 hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
817 class_device_rename(&dev->class_dev, dev->name);
818 notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
823 * netdev_state_change - device changes state
824 * @dev: device to cause notification
826 * Called to indicate a device has changed state. This function calls
827 * the notifier chains for netdev_chain and sends a NEWLINK message
828 * to the routing socket.
830 void netdev_state_change(struct net_device *dev)
832 if (dev->flags & IFF_UP) {
833 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
834 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
839 * dev_load - load a network module
840 * @name: name of interface
842 * If a network interface is not present and the process has suitable
843 * privileges this function loads the module. If module loading is not
844 * available in this kernel then it becomes a nop.
847 void dev_load(const char *name)
849 struct net_device *dev;
851 read_lock(&dev_base_lock);
852 dev = __dev_get_by_name(name);
853 read_unlock(&dev_base_lock);
855 if (!dev && capable(CAP_SYS_MODULE))
856 request_module("%s", name);
859 static int default_rebuild_header(struct sk_buff *skb)
861 printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n",
862 skb->dev ? skb->dev->name : "NULL!!!");
869 * Some old buggy device drivers change get_stats after registering
870 * the device. Try and trap them here.
871 * This can be elimnated when all devices are known fixed.
873 static inline int get_stats_changed(struct net_device *dev)
875 int changed = dev->last_stats != dev->get_stats;
876 dev->last_stats = dev->get_stats;
881 * dev_open - prepare an interface for use.
882 * @dev: device to open
884 * Takes a device from down to up state. The device's private open
885 * function is invoked and then the multicast lists are loaded. Finally
886 * the device is moved into the up state and a %NETDEV_UP message is
887 * sent to the netdev notifier chain.
889 * Calling this function on an active interface is a nop. On a failure
890 * a negative errno code is returned.
892 int dev_open(struct net_device *dev)
900 if (dev->flags & IFF_UP)
904 * Check for broken device drivers.
906 if (get_stats_changed(dev) && net_ratelimit()) {
907 printk(KERN_ERR "%s: driver changed get_stats after register\n",
912 * Is it even present?
914 if (!netif_device_present(dev))
918 * Call device private open method
920 set_bit(__LINK_STATE_START, &dev->state);
922 ret = dev->open(dev);
924 clear_bit(__LINK_STATE_START, &dev->state);
928 * Check for more broken device drivers.
930 if (get_stats_changed(dev) && net_ratelimit()) {
931 printk(KERN_ERR "%s: driver changed get_stats in open\n",
936 * If it went open OK then:
943 dev->flags |= IFF_UP;
946 * Initialize multicasting status
951 * Wakeup transmit queue engine
956 * ... and announce new interface.
958 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
963 #ifdef CONFIG_NET_FASTROUTE
965 static void dev_do_clear_fastroute(struct net_device *dev)
967 if (dev->accept_fastpath) {
970 for (i = 0; i <= NETDEV_FASTROUTE_HMASK; i++) {
971 struct dst_entry *dst;
973 write_lock_irq(&dev->fastpath_lock);
974 dst = dev->fastpath[i];
975 dev->fastpath[i] = NULL;
976 write_unlock_irq(&dev->fastpath_lock);
983 void dev_clear_fastroute(struct net_device *dev)
986 dev_do_clear_fastroute(dev);
988 read_lock(&dev_base_lock);
989 for (dev = dev_base; dev; dev = dev->next)
990 dev_do_clear_fastroute(dev);
991 read_unlock(&dev_base_lock);
997 * dev_close - shutdown an interface.
998 * @dev: device to shutdown
1000 * This function moves an active device into down state. A
1001 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1002 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1005 int dev_close(struct net_device *dev)
1007 if (!(dev->flags & IFF_UP))
1011 * Tell people we are going down, so that they can
1012 * prepare to death, when device is still operating.
1014 notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
1016 dev_deactivate(dev);
1018 clear_bit(__LINK_STATE_START, &dev->state);
1020 /* Synchronize to scheduled poll. We cannot touch poll list,
1021 * it can be even on different cpu. So just clear netif_running(),
1022 * and wait when poll really will happen. Actually, the best place
1023 * for this is inside dev->stop() after device stopped its irq
1024 * engine, but this requires more changes in devices. */
1026 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1027 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
1029 current->state = TASK_INTERRUPTIBLE;
1030 schedule_timeout(1);
1034 * Call the device specific close. This cannot fail.
1035 * Only if device is UP
1037 * We allow it to be called even after a DETACH hot-plug
1044 * Device is now down.
1047 dev->flags &= ~IFF_UP;
1048 #ifdef CONFIG_NET_FASTROUTE
1049 dev_clear_fastroute(dev);
1053 * Tell people we are down
1055 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
1062 * Device change register/unregister. These are not inline or static
1063 * as we export them to the world.
1067 * register_netdevice_notifier - register a network notifier block
1070 * Register a notifier to be called when network device events occur.
1071 * The notifier passed is linked into the kernel structures and must
1072 * not be reused until it has been unregistered. A negative errno code
1073 * is returned on a failure.
1075 * When registered all registration and up events are replayed
1076 * to the new notifier to allow device to have a race free
1077 * view of the network device list.
1080 int register_netdevice_notifier(struct notifier_block *nb)
1082 struct net_device *dev;
1086 err = notifier_chain_register(&netdev_chain, nb);
1088 for (dev = dev_base; dev; dev = dev->next) {
1089 nb->notifier_call(nb, NETDEV_REGISTER, dev);
1091 if (dev->flags & IFF_UP)
1092 nb->notifier_call(nb, NETDEV_UP, dev);
1100 * unregister_netdevice_notifier - unregister a network notifier block
1103 * Unregister a notifier previously registered by
1104 * register_netdevice_notifier(). The notifier is unlinked into the
1105 * kernel structures and may then be reused. A negative errno code
1106 * is returned on a failure.
1109 int unregister_netdevice_notifier(struct notifier_block *nb)
1111 return notifier_chain_unregister(&netdev_chain, nb);
1115 * call_netdevice_notifiers - call all network notifier blocks
1116 * @val: value passed unmodified to notifier function
1117 * @v: pointer passed unmodified to notifier function
1119 * Call all network notifier blocks. Parameters and return value
1120 * are as for notifier_call_chain().
1123 int call_netdevice_notifiers(unsigned long val, void *v)
1125 return notifier_call_chain(&netdev_chain, val, v);
1129 * Support routine. Sends outgoing frames to any network
1130 * taps currently in use.
1133 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1135 struct packet_type *ptype;
1136 net_timestamp(&skb->stamp);
1139 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1140 /* Never send packets back to the socket
1141 * they originated from - MvS (miquels@drinkel.ow.org)
1143 if ((ptype->dev == dev || !ptype->dev) &&
1144 (ptype->af_packet_priv == NULL ||
1145 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1146 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1150 /* skb->nh should be correctly
1151 set by sender, so that the second statement is
1152 just protection against buggy protocols.
1154 skb2->mac.raw = skb2->data;
1156 if (skb2->nh.raw < skb2->data ||
1157 skb2->nh.raw > skb2->tail) {
1158 if (net_ratelimit())
1159 printk(KERN_CRIT "protocol %04x is "
1161 skb2->protocol, dev->name);
1162 skb2->nh.raw = skb2->data;
1165 skb2->h.raw = skb2->nh.raw;
1166 skb2->pkt_type = PACKET_OUTGOING;
1167 ptype->func(skb2, skb->dev, ptype);
1173 /* Calculate csum in the case, when packet is misrouted.
1174 * If it failed by some reason, ignore and send skb with wrong
1177 struct sk_buff *skb_checksum_help(struct sk_buff *skb)
1180 int offset = skb->h.raw - skb->data;
1182 if (offset > (int)skb->len)
1184 csum = skb_checksum(skb, offset, skb->len-offset, 0);
1186 offset = skb->tail - skb->h.raw;
1189 if (skb->csum + 2 > offset)
1192 *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
1193 skb->ip_summed = CHECKSUM_NONE;
1197 #ifdef CONFIG_HIGHMEM
1198 /* Actually, we should eliminate this check as soon as we know, that:
1199 * 1. IOMMU is present and allows to map all the memory.
1200 * 2. No high memory really exists on this machine.
1203 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1207 if (dev->features & NETIF_F_HIGHDMA)
1210 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1211 if (skb_shinfo(skb)->frags[i].page >= highmem_start_page)
1217 #define illegal_highdma(dev, skb) (0)
1220 extern void skb_release_data(struct sk_buff *);
1222 /* Keep head the same: replace data */
1223 int __skb_linearize(struct sk_buff *skb, int gfp_mask)
1228 struct skb_shared_info *ninfo;
1229 int headerlen = skb->data - skb->head;
1230 int expand = (skb->tail + skb->data_len) - skb->end;
1232 if (skb_shared(skb))
1238 size = skb->end - skb->head + expand;
1239 size = SKB_DATA_ALIGN(size);
1240 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
1244 /* Copy entire thing */
1245 if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
1249 ninfo = (struct skb_shared_info*)(data + size);
1250 atomic_set(&ninfo->dataref, 1);
1251 ninfo->tso_size = skb_shinfo(skb)->tso_size;
1252 ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
1253 ninfo->nr_frags = 0;
1254 ninfo->frag_list = NULL;
1256 /* Offset between the two in bytes */
1257 offset = data - skb->head;
1259 /* Free old data. */
1260 skb_release_data(skb);
1263 skb->end = data + size;
1265 /* Set up new pointers */
1266 skb->h.raw += offset;
1267 skb->nh.raw += offset;
1268 skb->mac.raw += offset;
1269 skb->tail += offset;
1270 skb->data += offset;
1272 /* We are no longer a clone, even if we were. */
1275 skb->tail += skb->data_len;
1281 * dev_queue_xmit - transmit a buffer
1282 * @skb: buffer to transmit
1284 * Queue a buffer for transmission to a network device. The caller must
1285 * have set the device and priority and built the buffer before calling
1286 * this function. The function can be called from an interrupt.
1288 * A negative errno code is returned on a failure. A success does not
1289 * guarantee the frame will be transmitted as it may be dropped due
1290 * to congestion or traffic shaping.
1293 int dev_queue_xmit(struct sk_buff *skb)
1295 struct net_device *dev = skb->dev;
1299 if (skb_shinfo(skb)->frag_list &&
1300 !(dev->features & NETIF_F_FRAGLIST) &&
1301 __skb_linearize(skb, GFP_ATOMIC))
1304 /* Fragmented skb is linearized if device does not support SG,
1305 * or if at least one of fragments is in highmem and device
1306 * does not support DMA from it.
1308 if (skb_shinfo(skb)->nr_frags &&
1309 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1310 __skb_linearize(skb, GFP_ATOMIC))
1313 /* If packet is not checksummed and device does not support
1314 * checksumming for this protocol, complete checksumming here.
1316 if (skb->ip_summed == CHECKSUM_HW &&
1317 (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) &&
1318 (!(dev->features & NETIF_F_IP_CSUM) ||
1319 skb->protocol != htons(ETH_P_IP)))) {
1320 if ((skb = skb_checksum_help(skb)) == NULL)
1324 /* Grab device queue */
1325 spin_lock_bh(&dev->queue_lock);
1328 rc = q->enqueue(skb, q);
1332 spin_unlock_bh(&dev->queue_lock);
1333 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1337 /* The device has no queue. Common case for software devices:
1338 loopback, all the sorts of tunnels...
1340 Really, it is unlikely that xmit_lock protection is necessary here.
1341 (f.e. loopback and IP tunnels are clean ignoring statistics
1343 However, it is possible, that they rely on protection
1346 Check this and shot the lock. It is not prone from deadlocks.
1347 Either shot noqueue qdisc, it is even simpler 8)
1349 if (dev->flags & IFF_UP) {
1350 int cpu = smp_processor_id();
1352 if (dev->xmit_lock_owner != cpu) {
1354 * The spin_lock effectivly does a preempt lock, but
1355 * we are about to drop that...
1358 spin_unlock(&dev->queue_lock);
1359 spin_lock(&dev->xmit_lock);
1360 dev->xmit_lock_owner = cpu;
1363 if (!netif_queue_stopped(dev)) {
1365 dev_queue_xmit_nit(skb, dev);
1368 if (!dev->hard_start_xmit(skb, dev)) {
1369 dev->xmit_lock_owner = -1;
1370 spin_unlock_bh(&dev->xmit_lock);
1374 dev->xmit_lock_owner = -1;
1375 spin_unlock_bh(&dev->xmit_lock);
1376 if (net_ratelimit())
1377 printk(KERN_CRIT "Virtual device %s asks to "
1378 "queue packet!\n", dev->name);
1381 /* Recursion is detected! It is possible,
1383 if (net_ratelimit())
1384 printk(KERN_CRIT "Dead loop on virtual device "
1385 "%s, fix it urgently!\n", dev->name);
1388 spin_unlock_bh(&dev->queue_lock);
1398 /*=======================================================================
1400 =======================================================================*/
1402 int netdev_max_backlog = 300;
1403 int weight_p = 64; /* old backlog weight */
1404 /* These numbers are selected based on intuition and some
1405 * experimentatiom, if you have more scientific way of doing this
1406 * please go ahead and fix things.
1408 int no_cong_thresh = 10;
1413 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1416 #ifdef CONFIG_NET_HW_FLOWCONTROL
1417 atomic_t netdev_dropping = ATOMIC_INIT(0);
1418 static unsigned long netdev_fc_mask = 1;
1419 unsigned long netdev_fc_xoff;
1420 spinlock_t netdev_fc_lock = SPIN_LOCK_UNLOCKED;
1424 void (*stimul)(struct net_device *);
1425 struct net_device *dev;
1426 } netdev_fc_slots[BITS_PER_LONG];
1428 int netdev_register_fc(struct net_device *dev,
1429 void (*stimul)(struct net_device *dev))
1432 unsigned long flags;
1434 spin_lock_irqsave(&netdev_fc_lock, flags);
1435 if (netdev_fc_mask != ~0UL) {
1436 bit = ffz(netdev_fc_mask);
1437 netdev_fc_slots[bit].stimul = stimul;
1438 netdev_fc_slots[bit].dev = dev;
1439 set_bit(bit, &netdev_fc_mask);
1440 clear_bit(bit, &netdev_fc_xoff);
1442 spin_unlock_irqrestore(&netdev_fc_lock, flags);
1446 void netdev_unregister_fc(int bit)
1448 unsigned long flags;
1450 spin_lock_irqsave(&netdev_fc_lock, flags);
1452 netdev_fc_slots[bit].stimul = NULL;
1453 netdev_fc_slots[bit].dev = NULL;
1454 clear_bit(bit, &netdev_fc_mask);
1455 clear_bit(bit, &netdev_fc_xoff);
1457 spin_unlock_irqrestore(&netdev_fc_lock, flags);
1460 static void netdev_wakeup(void)
1464 spin_lock(&netdev_fc_lock);
1465 xoff = netdev_fc_xoff;
1470 netdev_fc_slots[i].stimul(netdev_fc_slots[i].dev);
1472 spin_unlock(&netdev_fc_lock);
1476 static void get_sample_stats(int cpu)
1482 struct softnet_data *sd = &per_cpu(softnet_data, cpu);
1483 int blog = sd->input_pkt_queue.qlen;
1484 int avg_blog = sd->avg_blog;
1486 avg_blog = (avg_blog >> 1) + (blog >> 1);
1488 if (avg_blog > mod_cong) {
1489 /* Above moderate congestion levels. */
1490 sd->cng_level = NET_RX_CN_HIGH;
1493 rq = rd % netdev_max_backlog;
1494 if (rq < avg_blog) /* unlucky bastard */
1495 sd->cng_level = NET_RX_DROP;
1497 } else if (avg_blog > lo_cong) {
1498 sd->cng_level = NET_RX_CN_MOD;
1501 rq = rd % netdev_max_backlog;
1502 if (rq < avg_blog) /* unlucky bastard */
1503 sd->cng_level = NET_RX_CN_HIGH;
1505 } else if (avg_blog > no_cong)
1506 sd->cng_level = NET_RX_CN_LOW;
1507 else /* no congestion */
1508 sd->cng_level = NET_RX_SUCCESS;
1510 sd->avg_blog = avg_blog;
1513 #ifdef OFFLINE_SAMPLE
1514 static void sample_queue(unsigned long dummy)
1516 /* 10 ms 0r 1ms -- i don't care -- JHS */
1518 int cpu = smp_processor_id();
1520 get_sample_stats(cpu);
1521 next_tick += jiffies;
1522 mod_timer(&samp_timer, next_tick);
1528 * netif_rx - post buffer to the network code
1529 * @skb: buffer to post
1531 * This function receives a packet from a device driver and queues it for
1532 * the upper (protocol) levels to process. It always succeeds. The buffer
1533 * may be dropped during processing for congestion control or by the
1537 * NET_RX_SUCCESS (no congestion)
1538 * NET_RX_CN_LOW (low congestion)
1539 * NET_RX_CN_MOD (moderate congestion)
1540 * NET_RX_CN_HIGH (high congestion)
1541 * NET_RX_DROP (packet was dropped)
1545 int netif_rx(struct sk_buff *skb)
1548 struct softnet_data *queue;
1549 unsigned long flags;
1551 #ifdef CONFIG_NETPOLL_RX
1552 if (skb->dev->netpoll_rx && netpoll_rx(skb)) {
1558 if (!skb->stamp.tv_sec)
1559 net_timestamp(&skb->stamp);
1562 * The code is rearranged so that the path is the most
1563 * short when CPU is congested, but is still operating.
1565 local_irq_save(flags);
1566 this_cpu = smp_processor_id();
1567 queue = &__get_cpu_var(softnet_data);
1569 __get_cpu_var(netdev_rx_stat).total++;
1570 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1571 if (queue->input_pkt_queue.qlen) {
1572 if (queue->throttle)
1577 __skb_queue_tail(&queue->input_pkt_queue, skb);
1578 #ifndef OFFLINE_SAMPLE
1579 get_sample_stats(this_cpu);
1581 local_irq_restore(flags);
1582 return queue->cng_level;
1585 if (queue->throttle) {
1586 queue->throttle = 0;
1587 #ifdef CONFIG_NET_HW_FLOWCONTROL
1588 if (atomic_dec_and_test(&netdev_dropping))
1593 netif_rx_schedule(&queue->backlog_dev);
1597 if (!queue->throttle) {
1598 queue->throttle = 1;
1599 __get_cpu_var(netdev_rx_stat).throttled++;
1600 #ifdef CONFIG_NET_HW_FLOWCONTROL
1601 atomic_inc(&netdev_dropping);
1606 __get_cpu_var(netdev_rx_stat).dropped++;
1607 local_irq_restore(flags);
1613 static __inline__ void skb_bond(struct sk_buff *skb)
1615 struct net_device *dev = skb->dev;
1618 skb->real_dev = skb->dev;
1619 skb->dev = dev->master;
1623 static void net_tx_action(struct softirq_action *h)
1625 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1627 if (sd->completion_queue) {
1628 struct sk_buff *clist;
1630 local_irq_disable();
1631 clist = sd->completion_queue;
1632 sd->completion_queue = NULL;
1636 struct sk_buff *skb = clist;
1637 clist = clist->next;
1639 BUG_TRAP(!atomic_read(&skb->users));
1644 if (sd->output_queue) {
1645 struct net_device *head;
1647 local_irq_disable();
1648 head = sd->output_queue;
1649 sd->output_queue = NULL;
1653 struct net_device *dev = head;
1654 head = head->next_sched;
1656 smp_mb__before_clear_bit();
1657 clear_bit(__LINK_STATE_SCHED, &dev->state);
1659 if (spin_trylock(&dev->queue_lock)) {
1661 spin_unlock(&dev->queue_lock);
1663 netif_schedule(dev);
1669 static __inline__ int deliver_skb(struct sk_buff *skb,
1670 struct packet_type *pt_prev, int last)
1672 atomic_inc(&skb->users);
1673 return pt_prev->func(skb, skb->dev, pt_prev);
1677 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
1678 int (*br_handle_frame_hook)(struct sk_buff *skb);
1680 static __inline__ int handle_bridge(struct sk_buff *skb,
1681 struct packet_type *pt_prev)
1683 int ret = NET_RX_DROP;
1685 ret = deliver_skb(skb, pt_prev, 0);
1692 static inline int __handle_bridge(struct sk_buff *skb,
1693 struct packet_type **pt_prev, int *ret)
1695 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
1696 if (skb->dev->br_port && skb->pkt_type != PACKET_LOOPBACK) {
1697 *ret = handle_bridge(skb, *pt_prev);
1698 if (br_handle_frame_hook(skb) == 0)
1707 int netif_receive_skb(struct sk_buff *skb)
1709 struct packet_type *ptype, *pt_prev;
1710 int ret = NET_RX_DROP;
1711 unsigned short type;
1713 #ifdef CONFIG_NETPOLL_RX
1714 if (skb->dev->netpoll_rx && skb->dev->poll && netpoll_rx(skb)) {
1720 if (!skb->stamp.tv_sec)
1721 net_timestamp(&skb->stamp);
1725 __get_cpu_var(netdev_rx_stat).total++;
1727 #ifdef CONFIG_NET_FASTROUTE
1728 if (skb->pkt_type == PACKET_FASTROUTE) {
1729 __get_cpu_var(netdev_rx_stat).fastroute_deferred_out++;
1730 return dev_queue_xmit(skb);
1734 skb->h.raw = skb->nh.raw = skb->data;
1735 skb->mac_len = skb->nh.raw - skb->mac.raw;
1739 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1740 if (!ptype->dev || ptype->dev == skb->dev) {
1742 ret = deliver_skb(skb, pt_prev, 0);
1747 handle_diverter(skb);
1749 if (__handle_bridge(skb, &pt_prev, &ret))
1752 type = skb->protocol;
1753 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
1754 if (ptype->type == type &&
1755 (!ptype->dev || ptype->dev == skb->dev)) {
1757 ret = deliver_skb(skb, pt_prev, 0);
1763 ret = pt_prev->func(skb, skb->dev, pt_prev);
1766 /* Jamal, now you will not able to escape explaining
1767 * me how you were going to use this. :-)
1777 static int process_backlog(struct net_device *backlog_dev, int *budget)
1780 int quota = min(backlog_dev->quota, *budget);
1781 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1782 unsigned long start_time = jiffies;
1785 struct sk_buff *skb;
1786 struct net_device *dev;
1788 local_irq_disable();
1789 skb = __skb_dequeue(&queue->input_pkt_queue);
1796 netif_receive_skb(skb);
1802 if (work >= quota || jiffies - start_time > 1)
1805 #ifdef CONFIG_NET_HW_FLOWCONTROL
1806 if (queue->throttle &&
1807 queue->input_pkt_queue.qlen < no_cong_thresh ) {
1808 queue->throttle = 0;
1809 if (atomic_dec_and_test(&netdev_dropping)) {
1817 backlog_dev->quota -= work;
1822 backlog_dev->quota -= work;
1825 list_del(&backlog_dev->poll_list);
1826 smp_mb__before_clear_bit();
1827 netif_poll_enable(backlog_dev);
1829 if (queue->throttle) {
1830 queue->throttle = 0;
1831 #ifdef CONFIG_NET_HW_FLOWCONTROL
1832 if (atomic_dec_and_test(&netdev_dropping))
1840 static void net_rx_action(struct softirq_action *h)
1842 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1843 unsigned long start_time = jiffies;
1844 int budget = netdev_max_backlog;
1847 local_irq_disable();
1849 while (!list_empty(&queue->poll_list)) {
1850 struct net_device *dev;
1852 if (budget <= 0 || jiffies - start_time > 1)
1857 dev = list_entry(queue->poll_list.next,
1858 struct net_device, poll_list);
1860 if (dev->quota <= 0 || dev->poll(dev, &budget)) {
1861 local_irq_disable();
1862 list_del(&dev->poll_list);
1863 list_add_tail(&dev->poll_list, &queue->poll_list);
1865 dev->quota += dev->weight;
1867 dev->quota = dev->weight;
1870 local_irq_disable();
1878 __get_cpu_var(netdev_rx_stat).time_squeeze++;
1879 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
1883 static gifconf_func_t * gifconf_list [NPROTO];
1886 * register_gifconf - register a SIOCGIF handler
1887 * @family: Address family
1888 * @gifconf: Function handler
1890 * Register protocol dependent address dumping routines. The handler
1891 * that is passed must not be freed or reused until it has been replaced
1892 * by another handler.
1894 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
1896 if (family >= NPROTO)
1898 gifconf_list[family] = gifconf;
1904 * Map an interface index to its name (SIOCGIFNAME)
1908 * We need this ioctl for efficient implementation of the
1909 * if_indextoname() function required by the IPv6 API. Without
1910 * it, we would have to search all the interfaces to find a
1914 static int dev_ifname(struct ifreq __user *arg)
1916 struct net_device *dev;
1920 * Fetch the caller's info block.
1923 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
1926 read_lock(&dev_base_lock);
1927 dev = __dev_get_by_index(ifr.ifr_ifindex);
1929 read_unlock(&dev_base_lock);
1933 strcpy(ifr.ifr_name, dev->name);
1934 read_unlock(&dev_base_lock);
1936 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
1942 * Perform a SIOCGIFCONF call. This structure will change
1943 * size eventually, and there is nothing I can do about it.
1944 * Thus we will need a 'compatibility mode'.
1947 static int dev_ifconf(char __user *arg)
1950 struct net_device *dev;
1957 * Fetch the caller's info block.
1960 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
1967 * Loop over the interfaces, and write an info block for each.
1971 for (dev = dev_base; dev; dev = dev->next) {
1972 if (!dev_in_nx_info(dev, current->nx_info))
1974 for (i = 0; i < NPROTO; i++) {
1975 if (gifconf_list[i]) {
1978 done = gifconf_list[i](dev, NULL, 0);
1980 done = gifconf_list[i](dev, pos + total,
1990 * All done. Write the updated control block back to the caller.
1992 ifc.ifc_len = total;
1995 * Both BSD and Solaris return 0 here, so we do too.
1997 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2000 #ifdef CONFIG_PROC_FS
2002 * This is invoked by the /proc filesystem handler to display a device
2005 static __inline__ struct net_device *dev_get_idx(loff_t pos)
2007 struct net_device *dev;
2010 for (i = 0, dev = dev_base; dev && i < pos; ++i, dev = dev->next);
2012 return i == pos ? dev : NULL;
2015 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2017 read_lock(&dev_base_lock);
2018 return *pos ? dev_get_idx(*pos - 1) : SEQ_START_TOKEN;
2021 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2024 return v == SEQ_START_TOKEN ? dev_base : ((struct net_device *)v)->next;
2027 void dev_seq_stop(struct seq_file *seq, void *v)
2029 read_unlock(&dev_base_lock);
2032 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2034 struct nx_info *nxi = current->nx_info;
2036 if (!dev_in_nx_info(dev, nxi))
2038 if (dev->get_stats) {
2039 struct net_device_stats *stats = dev->get_stats(dev);
2041 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2042 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2043 dev->name, stats->rx_bytes, stats->rx_packets,
2045 stats->rx_dropped + stats->rx_missed_errors,
2046 stats->rx_fifo_errors,
2047 stats->rx_length_errors + stats->rx_over_errors +
2048 stats->rx_crc_errors + stats->rx_frame_errors,
2049 stats->rx_compressed, stats->multicast,
2050 stats->tx_bytes, stats->tx_packets,
2051 stats->tx_errors, stats->tx_dropped,
2052 stats->tx_fifo_errors, stats->collisions,
2053 stats->tx_carrier_errors +
2054 stats->tx_aborted_errors +
2055 stats->tx_window_errors +
2056 stats->tx_heartbeat_errors,
2057 stats->tx_compressed);
2059 seq_printf(seq, "%6s: No statistics available.\n", dev->name);
2063 * Called from the PROCfs module. This now uses the new arbitrary sized
2064 * /proc/net interface to create /proc/net/dev
2066 static int dev_seq_show(struct seq_file *seq, void *v)
2068 if (v == SEQ_START_TOKEN)
2069 seq_puts(seq, "Inter-| Receive "
2071 " face |bytes packets errs drop fifo frame "
2072 "compressed multicast|bytes packets errs "
2073 "drop fifo colls carrier compressed\n");
2075 dev_seq_printf_stats(seq, v);
2079 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2081 struct netif_rx_stats *rc = NULL;
2083 while (*pos < NR_CPUS)
2084 if (cpu_online(*pos)) {
2085 rc = &per_cpu(netdev_rx_stat, *pos);
2092 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2094 return softnet_get_online(pos);
2097 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2100 return softnet_get_online(pos);
2103 static void softnet_seq_stop(struct seq_file *seq, void *v)
2107 static int softnet_seq_show(struct seq_file *seq, void *v)
2109 struct netif_rx_stats *s = v;
2111 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2112 s->total, s->dropped, s->time_squeeze, s->throttled,
2113 s->fastroute_hit, s->fastroute_success, s->fastroute_defer,
2114 s->fastroute_deferred_out,
2116 s->fastroute_latency_reduction
2124 static struct seq_operations dev_seq_ops = {
2125 .start = dev_seq_start,
2126 .next = dev_seq_next,
2127 .stop = dev_seq_stop,
2128 .show = dev_seq_show,
2131 static int dev_seq_open(struct inode *inode, struct file *file)
2133 return seq_open(file, &dev_seq_ops);
2136 static struct file_operations dev_seq_fops = {
2137 .owner = THIS_MODULE,
2138 .open = dev_seq_open,
2140 .llseek = seq_lseek,
2141 .release = seq_release,
2144 static struct seq_operations softnet_seq_ops = {
2145 .start = softnet_seq_start,
2146 .next = softnet_seq_next,
2147 .stop = softnet_seq_stop,
2148 .show = softnet_seq_show,
2151 static int softnet_seq_open(struct inode *inode, struct file *file)
2153 return seq_open(file, &softnet_seq_ops);
2156 static struct file_operations softnet_seq_fops = {
2157 .owner = THIS_MODULE,
2158 .open = softnet_seq_open,
2160 .llseek = seq_lseek,
2161 .release = seq_release,
2165 extern int wireless_proc_init(void);
2167 #define wireless_proc_init() 0
2170 static int __init dev_proc_init(void)
2174 if (!proc_net_fops_create("dev", S_IRUGO, &dev_seq_fops))
2176 if (!proc_net_fops_create("softnet_stat", S_IRUGO, &softnet_seq_fops))
2178 if (wireless_proc_init())
2184 proc_net_remove("softnet_stat");
2186 proc_net_remove("dev");
2190 #define dev_proc_init() 0
2191 #endif /* CONFIG_PROC_FS */
2195 * netdev_set_master - set up master/slave pair
2196 * @slave: slave device
2197 * @master: new master device
2199 * Changes the master device of the slave. Pass %NULL to break the
2200 * bonding. The caller must hold the RTNL semaphore. On a failure
2201 * a negative errno code is returned. On success the reference counts
2202 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2203 * function returns zero.
2205 int netdev_set_master(struct net_device *slave, struct net_device *master)
2207 struct net_device *old = slave->master;
2217 slave->master = master;
2225 slave->flags |= IFF_SLAVE;
2227 slave->flags &= ~IFF_SLAVE;
2229 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2234 * dev_set_promiscuity - update promiscuity count on a device
2238 * Add or remove promsicuity from a device. While the count in the device
2239 * remains above zero the interface remains promiscuous. Once it hits zero
2240 * the device reverts back to normal filtering operation. A negative inc
2241 * value is used to drop promiscuity on the device.
2243 void dev_set_promiscuity(struct net_device *dev, int inc)
2245 unsigned short old_flags = dev->flags;
2247 dev->flags |= IFF_PROMISC;
2248 if ((dev->promiscuity += inc) == 0)
2249 dev->flags &= ~IFF_PROMISC;
2250 if (dev->flags ^ old_flags) {
2251 #ifdef CONFIG_NET_FASTROUTE
2252 if (dev->flags & IFF_PROMISC) {
2253 netdev_fastroute_obstacles++;
2254 dev_clear_fastroute(dev);
2256 netdev_fastroute_obstacles--;
2259 printk(KERN_INFO "device %s %s promiscuous mode\n",
2260 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2266 * dev_set_allmulti - update allmulti count on a device
2270 * Add or remove reception of all multicast frames to a device. While the
2271 * count in the device remains above zero the interface remains listening
2272 * to all interfaces. Once it hits zero the device reverts back to normal
2273 * filtering operation. A negative @inc value is used to drop the counter
2274 * when releasing a resource needing all multicasts.
2277 void dev_set_allmulti(struct net_device *dev, int inc)
2279 unsigned short old_flags = dev->flags;
2281 dev->flags |= IFF_ALLMULTI;
2282 if ((dev->allmulti += inc) == 0)
2283 dev->flags &= ~IFF_ALLMULTI;
2284 if (dev->flags ^ old_flags)
2288 unsigned dev_get_flags(const struct net_device *dev)
2292 flags = (dev->flags & ~(IFF_PROMISC |
2295 (dev->gflags & (IFF_PROMISC |
2298 if (netif_running(dev) && netif_carrier_ok(dev))
2299 flags |= IFF_RUNNING;
2304 int dev_change_flags(struct net_device *dev, unsigned flags)
2307 int old_flags = dev->flags;
2310 * Set the flags on our device.
2313 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
2314 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
2316 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
2320 * Load in the correct multicast list now the flags have changed.
2326 * Have we downed the interface. We handle IFF_UP ourselves
2327 * according to user attempts to set it, rather than blindly
2332 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
2333 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
2339 if (dev->flags & IFF_UP &&
2340 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
2342 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
2344 if ((flags ^ dev->gflags) & IFF_PROMISC) {
2345 int inc = (flags & IFF_PROMISC) ? +1 : -1;
2346 dev->gflags ^= IFF_PROMISC;
2347 dev_set_promiscuity(dev, inc);
2350 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
2351 is important. Some (broken) drivers set IFF_PROMISC, when
2352 IFF_ALLMULTI is requested not asking us and not reporting.
2354 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
2355 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
2356 dev->gflags ^= IFF_ALLMULTI;
2357 dev_set_allmulti(dev, inc);
2360 if (old_flags ^ dev->flags)
2361 rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags ^ dev->flags);
2366 int dev_set_mtu(struct net_device *dev, int new_mtu)
2370 if (new_mtu == dev->mtu)
2373 /* MTU must be positive. */
2377 if (!netif_device_present(dev))
2381 if (dev->change_mtu)
2382 err = dev->change_mtu(dev, new_mtu);
2385 if (!err && dev->flags & IFF_UP)
2386 notifier_call_chain(&netdev_chain,
2387 NETDEV_CHANGEMTU, dev);
2393 * Perform the SIOCxIFxxx calls.
2395 static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
2398 struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
2404 case SIOCGIFFLAGS: /* Get interface flags */
2405 ifr->ifr_flags = dev_get_flags(dev);
2408 case SIOCSIFFLAGS: /* Set interface flags */
2409 return dev_change_flags(dev, ifr->ifr_flags);
2411 case SIOCGIFMETRIC: /* Get the metric on the interface
2412 (currently unused) */
2413 ifr->ifr_metric = 0;
2416 case SIOCSIFMETRIC: /* Set the metric on the interface
2417 (currently unused) */
2420 case SIOCGIFMTU: /* Get the MTU of a device */
2421 ifr->ifr_mtu = dev->mtu;
2424 case SIOCSIFMTU: /* Set the MTU of a device */
2425 return dev_set_mtu(dev, ifr->ifr_mtu);
2428 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
2429 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2430 ifr->ifr_hwaddr.sa_family = dev->type;
2434 if (!dev->set_mac_address)
2436 if (ifr->ifr_hwaddr.sa_family != dev->type)
2438 if (!netif_device_present(dev))
2440 err = dev->set_mac_address(dev, &ifr->ifr_hwaddr);
2442 notifier_call_chain(&netdev_chain,
2443 NETDEV_CHANGEADDR, dev);
2446 case SIOCSIFHWBROADCAST:
2447 if (ifr->ifr_hwaddr.sa_family != dev->type)
2449 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
2450 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2451 notifier_call_chain(&netdev_chain,
2452 NETDEV_CHANGEADDR, dev);
2456 ifr->ifr_map.mem_start = dev->mem_start;
2457 ifr->ifr_map.mem_end = dev->mem_end;
2458 ifr->ifr_map.base_addr = dev->base_addr;
2459 ifr->ifr_map.irq = dev->irq;
2460 ifr->ifr_map.dma = dev->dma;
2461 ifr->ifr_map.port = dev->if_port;
2465 if (dev->set_config) {
2466 if (!netif_device_present(dev))
2468 return dev->set_config(dev, &ifr->ifr_map);
2473 if (!dev->set_multicast_list ||
2474 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2476 if (!netif_device_present(dev))
2478 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
2482 if (!dev->set_multicast_list ||
2483 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2485 if (!netif_device_present(dev))
2487 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
2491 ifr->ifr_ifindex = dev->ifindex;
2495 ifr->ifr_qlen = dev->tx_queue_len;
2499 if (ifr->ifr_qlen < 0)
2501 dev->tx_queue_len = ifr->ifr_qlen;
2505 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
2506 return dev_change_name(dev, ifr->ifr_newname);
2509 * Unknown or private ioctl
2513 if ((cmd >= SIOCDEVPRIVATE &&
2514 cmd <= SIOCDEVPRIVATE + 15) ||
2515 cmd == SIOCBONDENSLAVE ||
2516 cmd == SIOCBONDRELEASE ||
2517 cmd == SIOCBONDSETHWADDR ||
2518 cmd == SIOCBONDSLAVEINFOQUERY ||
2519 cmd == SIOCBONDINFOQUERY ||
2520 cmd == SIOCBONDCHANGEACTIVE ||
2521 cmd == SIOCGMIIPHY ||
2522 cmd == SIOCGMIIREG ||
2523 cmd == SIOCSMIIREG ||
2524 cmd == SIOCWANDEV) {
2526 if (dev->do_ioctl) {
2527 if (netif_device_present(dev))
2528 err = dev->do_ioctl(dev, ifr,
2541 * This function handles all "interface"-type I/O control requests. The actual
2542 * 'doing' part of this is dev_ifsioc above.
2546 * dev_ioctl - network device ioctl
2547 * @cmd: command to issue
2548 * @arg: pointer to a struct ifreq in user space
2550 * Issue ioctl functions to devices. This is normally called by the
2551 * user space syscall interfaces but can sometimes be useful for
2552 * other purposes. The return value is the return from the syscall if
2553 * positive or a negative errno code on error.
2556 int dev_ioctl(unsigned int cmd, void __user *arg)
2562 /* One special case: SIOCGIFCONF takes ifconf argument
2563 and requires shared lock, because it sleeps writing
2567 if (cmd == SIOCGIFCONF) {
2569 ret = dev_ifconf((char __user *) arg);
2573 if (cmd == SIOCGIFNAME)
2574 return dev_ifname((struct ifreq __user *)arg);
2576 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2579 ifr.ifr_name[IFNAMSIZ-1] = 0;
2581 colon = strchr(ifr.ifr_name, ':');
2586 * See which interface the caller is talking about.
2591 * These ioctl calls:
2592 * - can be done by all.
2593 * - atomic and do not require locking.
2604 dev_load(ifr.ifr_name);
2605 read_lock(&dev_base_lock);
2606 ret = dev_ifsioc(&ifr, cmd);
2607 read_unlock(&dev_base_lock);
2611 if (copy_to_user(arg, &ifr,
2612 sizeof(struct ifreq)))
2618 dev_load(ifr.ifr_name);
2620 ret = dev_ethtool(&ifr);
2625 if (copy_to_user(arg, &ifr,
2626 sizeof(struct ifreq)))
2632 * These ioctl calls:
2633 * - require superuser power.
2634 * - require strict serialization.
2640 if (!capable(CAP_NET_ADMIN))
2642 dev_load(ifr.ifr_name);
2644 ret = dev_ifsioc(&ifr, cmd);
2649 if (copy_to_user(arg, &ifr,
2650 sizeof(struct ifreq)))
2656 * These ioctl calls:
2657 * - require superuser power.
2658 * - require strict serialization.
2659 * - do not return a value
2669 case SIOCSIFHWBROADCAST:
2672 case SIOCBONDENSLAVE:
2673 case SIOCBONDRELEASE:
2674 case SIOCBONDSETHWADDR:
2675 case SIOCBONDSLAVEINFOQUERY:
2676 case SIOCBONDINFOQUERY:
2677 case SIOCBONDCHANGEACTIVE:
2678 if (!capable(CAP_NET_ADMIN))
2680 dev_load(ifr.ifr_name);
2682 ret = dev_ifsioc(&ifr, cmd);
2687 /* Get the per device memory space. We can add this but
2688 * currently do not support it */
2690 /* Set the per device memory buffer space.
2691 * Not applicable in our case */
2696 * Unknown or private ioctl.
2699 if (cmd == SIOCWANDEV ||
2700 (cmd >= SIOCDEVPRIVATE &&
2701 cmd <= SIOCDEVPRIVATE + 15)) {
2702 dev_load(ifr.ifr_name);
2704 ret = dev_ifsioc(&ifr, cmd);
2706 if (!ret && copy_to_user(arg, &ifr,
2707 sizeof(struct ifreq)))
2712 /* Take care of Wireless Extensions */
2713 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
2714 /* If command is `set a parameter', or
2715 * `get the encoding parameters', check if
2716 * the user has the right to do it */
2717 if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE) {
2718 if (!capable(CAP_NET_ADMIN))
2721 dev_load(ifr.ifr_name);
2723 /* Follow me in net/core/wireless.c */
2724 ret = wireless_process_ioctl(&ifr, cmd);
2726 if (!ret && IW_IS_GET(cmd) &&
2727 copy_to_user(arg, &ifr,
2728 sizeof(struct ifreq)))
2732 #endif /* WIRELESS_EXT */
2739 * dev_new_index - allocate an ifindex
2741 * Returns a suitable unique value for a new device interface
2742 * number. The caller must hold the rtnl semaphore or the
2743 * dev_base_lock to be sure it remains unique.
2745 int dev_new_index(void)
2751 if (!__dev_get_by_index(ifindex))
2756 static int dev_boot_phase = 1;
2758 /* Delayed registration/unregisteration */
2759 static spinlock_t net_todo_list_lock = SPIN_LOCK_UNLOCKED;
2760 static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);
2762 static inline void net_set_todo(struct net_device *dev)
2764 spin_lock(&net_todo_list_lock);
2765 list_add_tail(&dev->todo_list, &net_todo_list);
2766 spin_unlock(&net_todo_list_lock);
2770 * register_netdevice - register a network device
2771 * @dev: device to register
2773 * Take a completed network device structure and add it to the kernel
2774 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
2775 * chain. 0 is returned on success. A negative errno code is returned
2776 * on a failure to set up the device, or if the name is a duplicate.
2778 * Callers must hold the rtnl semaphore. See the comment at the
2779 * end of Space.c for details about the locking. You may want
2780 * register_netdev() instead of this.
2783 * The locking appears insufficient to guarantee two parallel registers
2784 * will not get the same name.
2787 int register_netdevice(struct net_device *dev)
2789 struct hlist_head *head;
2790 struct hlist_node *p;
2793 BUG_ON(dev_boot_phase);
2796 /* When net_device's are persistent, this will be fatal. */
2797 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
2799 spin_lock_init(&dev->queue_lock);
2800 spin_lock_init(&dev->xmit_lock);
2801 dev->xmit_lock_owner = -1;
2802 #ifdef CONFIG_NET_FASTROUTE
2803 dev->fastpath_lock = RW_LOCK_UNLOCKED;
2806 ret = alloc_divert_blk(dev);
2812 /* Init, if this function is available */
2814 ret = dev->init(dev);
2822 if (!dev_valid_name(dev->name)) {
2827 dev->ifindex = dev_new_index();
2828 if (dev->iflink == -1)
2829 dev->iflink = dev->ifindex;
2831 /* Check for existence of name */
2832 head = dev_name_hash(dev->name);
2833 hlist_for_each(p, head) {
2834 struct net_device *d
2835 = hlist_entry(p, struct net_device, name_hlist);
2836 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
2842 /* Fix illegal SG+CSUM combinations. */
2843 if ((dev->features & NETIF_F_SG) &&
2844 !(dev->features & (NETIF_F_IP_CSUM |
2846 NETIF_F_HW_CSUM))) {
2847 printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
2849 dev->features &= ~NETIF_F_SG;
2853 * nil rebuild_header routine,
2854 * that should be never called and used as just bug trap.
2857 if (!dev->rebuild_header)
2858 dev->rebuild_header = default_rebuild_header;
2861 * Default initial state at registry is that the
2862 * device is present.
2865 set_bit(__LINK_STATE_PRESENT, &dev->state);
2868 dev_init_scheduler(dev);
2869 write_lock_bh(&dev_base_lock);
2871 dev_tail = &dev->next;
2872 hlist_add_head(&dev->name_hlist, head);
2873 hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex));
2875 dev->reg_state = NETREG_REGISTERING;
2876 write_unlock_bh(&dev_base_lock);
2878 /* Notify protocols, that a new device appeared. */
2879 notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
2881 /* Finish registration after unlock */
2888 free_divert_blk(dev);
2893 * netdev_wait_allrefs - wait until all references are gone.
2895 * This is called when unregistering network devices.
2897 * Any protocol or device that holds a reference should register
2898 * for netdevice notification, and cleanup and put back the
2899 * reference if they receive an UNREGISTER event.
2900 * We can get stuck here if buggy protocols don't correctly
2903 static void netdev_wait_allrefs(struct net_device *dev)
2905 unsigned long rebroadcast_time, warning_time;
2907 rebroadcast_time = warning_time = jiffies;
2908 while (atomic_read(&dev->refcnt) != 0) {
2909 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
2913 /* Rebroadcast unregister notification */
2914 notifier_call_chain(&netdev_chain,
2915 NETDEV_UNREGISTER, dev);
2917 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
2919 /* We must not have linkwatch events
2920 * pending on unregister. If this
2921 * happens, we simply run the queue
2922 * unscheduled, resulting in a noop
2925 linkwatch_run_queue();
2931 rebroadcast_time = jiffies;
2934 current->state = TASK_INTERRUPTIBLE;
2935 schedule_timeout(HZ / 4);
2937 if (time_after(jiffies, warning_time + 10 * HZ)) {
2938 printk(KERN_EMERG "unregister_netdevice: "
2939 "waiting for %s to become free. Usage "
2941 dev->name, atomic_read(&dev->refcnt));
2942 warning_time = jiffies;
2951 * register_netdevice(x1);
2952 * register_netdevice(x2);
2954 * unregister_netdevice(y1);
2955 * unregister_netdevice(y2);
2961 * We are invoked by rtnl_unlock() after it drops the semaphore.
2962 * This allows us to deal with problems:
2963 * 1) We can create/delete sysfs objects which invoke hotplug
2964 * without deadlocking with linkwatch via keventd.
2965 * 2) Since we run with the RTNL semaphore not held, we can sleep
2966 * safely in order to wait for the netdev refcnt to drop to zero.
2968 static DECLARE_MUTEX(net_todo_run_mutex);
2969 void netdev_run_todo(void)
2971 struct list_head list = LIST_HEAD_INIT(list);
2973 /* Safe outside mutex since we only care about entries that
2974 * this cpu put into queue while under RTNL.
2976 if (list_empty(&net_todo_list))
2979 /* Need to guard against multiple cpu's getting out of order. */
2980 down(&net_todo_run_mutex);
2982 /* Snapshot list, allow later requests */
2983 spin_lock(&net_todo_list_lock);
2984 list_splice_init(&net_todo_list, &list);
2985 spin_unlock(&net_todo_list_lock);
2987 while (!list_empty(&list)) {
2988 struct net_device *dev
2989 = list_entry(list.next, struct net_device, todo_list);
2990 list_del(&dev->todo_list);
2992 switch(dev->reg_state) {
2993 case NETREG_REGISTERING:
2994 netdev_register_sysfs(dev);
2995 dev->reg_state = NETREG_REGISTERED;
2998 case NETREG_UNREGISTERING:
2999 netdev_unregister_sysfs(dev);
3000 dev->reg_state = NETREG_UNREGISTERED;
3002 netdev_wait_allrefs(dev);
3005 BUG_ON(atomic_read(&dev->refcnt));
3006 BUG_TRAP(!dev->ip_ptr);
3007 BUG_TRAP(!dev->ip6_ptr);
3008 BUG_TRAP(!dev->dn_ptr);
3011 /* It must be the very last action,
3012 * after this 'dev' may point to freed up memory.
3014 if (dev->destructor)
3015 dev->destructor(dev);
3019 printk(KERN_ERR "network todo '%s' but state %d\n",
3020 dev->name, dev->reg_state);
3025 up(&net_todo_run_mutex);
3029 * free_netdev - free network device
3032 * This function does the last stage of destroying an allocated device
3033 * interface. The reference to the device object is released.
3034 * If this is the last reference then it will be freed.
3036 void free_netdev(struct net_device *dev)
3038 /* Compatiablity with error handling in drivers */
3039 if (dev->reg_state == NETREG_UNINITIALIZED) {
3040 kfree((char *)dev - dev->padded);
3044 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
3045 dev->reg_state = NETREG_RELEASED;
3047 /* will free via class release */
3048 class_device_put(&dev->class_dev);
3051 /* Synchronize with packet receive processing. */
3052 void synchronize_net(void)
3055 synchronize_kernel();
3059 * unregister_netdevice - remove device from the kernel
3062 * This function shuts down a device interface and removes it
3063 * from the kernel tables. On success 0 is returned, on a failure
3064 * a negative errno code is returned.
3066 * Callers must hold the rtnl semaphore. See the comment at the
3067 * end of Space.c for details about the locking. You may want
3068 * unregister_netdev() instead of this.
3071 int unregister_netdevice(struct net_device *dev)
3073 struct net_device *d, **dp;
3075 BUG_ON(dev_boot_phase);
3078 /* Some devices call without registering for initialization unwind. */
3079 if (dev->reg_state == NETREG_UNINITIALIZED) {
3080 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3081 "was registered\n", dev->name, dev);
3085 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3087 /* If device is running, close it first. */
3088 if (dev->flags & IFF_UP)
3091 /* And unlink it from device chain. */
3092 for (dp = &dev_base; (d = *dp) != NULL; dp = &d->next) {
3094 write_lock_bh(&dev_base_lock);
3095 hlist_del(&dev->name_hlist);
3096 hlist_del(&dev->index_hlist);
3097 if (dev_tail == &dev->next)
3100 write_unlock_bh(&dev_base_lock);
3105 printk(KERN_ERR "unregister net_device: '%s' not found\n",
3110 dev->reg_state = NETREG_UNREGISTERING;
3114 #ifdef CONFIG_NET_FASTROUTE
3115 dev_clear_fastroute(dev);
3118 /* Shutdown queueing discipline. */
3122 /* Notify protocols, that we are about to destroy
3123 this device. They should clean all the things.
3125 notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
3128 * Flush the multicast chain
3130 dev_mc_discard(dev);
3135 /* Notifier chain MUST detach us from master device. */
3136 BUG_TRAP(!dev->master);
3138 free_divert_blk(dev);
3140 /* Finish processing unregister after unlock */
3147 #ifdef CONFIG_HOTPLUG_CPU
3148 static int dev_cpu_callback(struct notifier_block *nfb,
3149 unsigned long action,
3152 struct sk_buff **list_skb;
3153 struct net_device **list_net;
3154 struct sk_buff *skb;
3155 unsigned int cpu, oldcpu = (unsigned long)ocpu;
3156 struct softnet_data *sd, *oldsd;
3158 if (action != CPU_DEAD)
3161 local_irq_disable();
3162 cpu = smp_processor_id();
3163 sd = &per_cpu(softnet_data, cpu);
3164 oldsd = &per_cpu(softnet_data, oldcpu);
3166 /* Find end of our completion_queue. */
3167 list_skb = &sd->completion_queue;
3169 list_skb = &(*list_skb)->next;
3170 /* Append completion queue from offline CPU. */
3171 *list_skb = oldsd->completion_queue;
3172 oldsd->completion_queue = NULL;
3174 /* Find end of our output_queue. */
3175 list_net = &sd->output_queue;
3177 list_net = &(*list_net)->next_sched;
3178 /* Append output queue from offline CPU. */
3179 *list_net = oldsd->output_queue;
3180 oldsd->output_queue = NULL;
3182 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3185 /* Process offline CPU's input_pkt_queue */
3186 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
3191 #endif /* CONFIG_HOTPLUG_CPU */
3195 * Initialize the DEV module. At boot time this walks the device list and
3196 * unhooks any devices that fail to initialise (normally hardware not
3197 * present) and leaves us with a valid list of present and active devices.
3202 * This is called single threaded during boot, so no need
3203 * to take the rtnl semaphore.
3205 static int __init net_dev_init(void)
3207 int i, rc = -ENOMEM;
3209 BUG_ON(!dev_boot_phase);
3211 if (dev_proc_init())
3214 if (netdev_sysfs_init())
3217 INIT_LIST_HEAD(&ptype_all);
3218 for (i = 0; i < 16; i++)
3219 INIT_LIST_HEAD(&ptype_base[i]);
3221 for (i = 0; i < ARRAY_SIZE(dev_name_head); i++)
3222 INIT_HLIST_HEAD(&dev_name_head[i]);
3224 for (i = 0; i < ARRAY_SIZE(dev_index_head); i++)
3225 INIT_HLIST_HEAD(&dev_index_head[i]);
3228 * Initialise the packet receive queues.
3231 for (i = 0; i < NR_CPUS; i++) {
3232 struct softnet_data *queue;
3234 queue = &per_cpu(softnet_data, i);
3235 skb_queue_head_init(&queue->input_pkt_queue);
3236 queue->throttle = 0;
3237 queue->cng_level = 0;
3238 queue->avg_blog = 10; /* arbitrary non-zero */
3239 queue->completion_queue = NULL;
3240 INIT_LIST_HEAD(&queue->poll_list);
3241 set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
3242 queue->backlog_dev.weight = weight_p;
3243 queue->backlog_dev.poll = process_backlog;
3244 atomic_set(&queue->backlog_dev.refcnt, 1);
3247 #ifdef OFFLINE_SAMPLE
3248 samp_timer.expires = jiffies + (10 * HZ);
3249 add_timer(&samp_timer);
3254 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
3255 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
3257 hotcpu_notifier(dev_cpu_callback, 0);
3265 subsys_initcall(net_dev_init);
3267 EXPORT_SYMBOL(__dev_get);
3268 EXPORT_SYMBOL(__dev_get_by_flags);
3269 EXPORT_SYMBOL(__dev_get_by_index);
3270 EXPORT_SYMBOL(__dev_get_by_name);
3271 EXPORT_SYMBOL(__dev_remove_pack);
3272 EXPORT_SYMBOL(__skb_linearize);
3273 EXPORT_SYMBOL(call_netdevice_notifiers);
3274 EXPORT_SYMBOL(dev_add_pack);
3275 EXPORT_SYMBOL(dev_alloc_name);
3276 EXPORT_SYMBOL(dev_close);
3277 EXPORT_SYMBOL(dev_get_by_flags);
3278 EXPORT_SYMBOL(dev_get_by_index);
3279 EXPORT_SYMBOL(dev_get_by_name);
3280 EXPORT_SYMBOL(dev_getbyhwaddr);
3281 EXPORT_SYMBOL(dev_ioctl);
3282 EXPORT_SYMBOL(dev_new_index);
3283 EXPORT_SYMBOL(dev_open);
3284 EXPORT_SYMBOL(dev_queue_xmit);
3285 EXPORT_SYMBOL(dev_queue_xmit_nit);
3286 EXPORT_SYMBOL(dev_remove_pack);
3287 EXPORT_SYMBOL(dev_set_allmulti);
3288 EXPORT_SYMBOL(dev_set_promiscuity);
3289 EXPORT_SYMBOL(free_netdev);
3290 EXPORT_SYMBOL(netdev_boot_setup_check);
3291 EXPORT_SYMBOL(netdev_set_master);
3292 EXPORT_SYMBOL(netdev_state_change);
3293 EXPORT_SYMBOL(netif_receive_skb);
3294 EXPORT_SYMBOL(netif_rx);
3295 EXPORT_SYMBOL(register_gifconf);
3296 EXPORT_SYMBOL(register_netdevice);
3297 EXPORT_SYMBOL(register_netdevice_notifier);
3298 EXPORT_SYMBOL(skb_checksum_help);
3299 EXPORT_SYMBOL(synchronize_net);
3300 EXPORT_SYMBOL(unregister_netdevice);
3301 EXPORT_SYMBOL(unregister_netdevice_notifier);
3303 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
3304 EXPORT_SYMBOL(br_handle_frame_hook);
3306 /* for 801q VLAN support */
3307 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
3308 EXPORT_SYMBOL(dev_change_flags);
3311 EXPORT_SYMBOL(dev_load);
3313 #ifdef CONFIG_NET_HW_FLOWCONTROL
3314 EXPORT_SYMBOL(netdev_dropping);
3315 EXPORT_SYMBOL(netdev_fc_xoff);
3316 EXPORT_SYMBOL(netdev_register_fc);
3317 EXPORT_SYMBOL(netdev_unregister_fc);
3319 #ifdef CONFIG_NET_FASTROUTE
3320 EXPORT_SYMBOL(netdev_fastroute);
3321 EXPORT_SYMBOL(netdev_fastroute_obstacles);
3324 EXPORT_PER_CPU_SYMBOL(softnet_data);