2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <asm/bitops.h>
78 #include <linux/config.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/string.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/errno.h>
88 #include <linux/interrupt.h>
89 #include <linux/if_ether.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/notifier.h>
93 #include <linux/skbuff.h>
95 #include <linux/rtnetlink.h>
96 #include <linux/proc_fs.h>
97 #include <linux/seq_file.h>
98 #include <linux/stat.h>
99 #include <linux/if_bridge.h>
100 #include <linux/divert.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <linux/highmem.h>
105 #include <linux/init.h>
106 #include <linux/kmod.h>
107 #include <linux/module.h>
108 #include <linux/kallsyms.h>
109 #include <linux/netpoll.h>
110 #include <linux/rcupdate.h>
111 #ifdef CONFIG_NET_RADIO
112 #include <linux/wireless.h> /* Note : will define WIRELESS_EXT */
113 #include <net/iw_handler.h>
114 #endif /* CONFIG_NET_RADIO */
115 #include <asm/current.h>
117 /* This define, if set, will randomly drop a packet when congestion
118 * is more than moderate. It helps fairness in the multi-interface
119 * case when one of them is a hog, but it kills performance for the
120 * single interface case so it is off now by default.
124 /* Setting this will sample the queue lengths and thus congestion
125 * via a timer instead of as each packet is received.
127 #undef OFFLINE_SAMPLE
130 * The list of packet types we will receive (as opposed to discard)
131 * and the routines to invoke.
133 * Why 16. Because with 16 the only overlap we get on a hash of the
134 * low nibble of the protocol value is RARP/SNAP/X.25.
136 * NOTE: That is no longer true with the addition of VLAN tags. Not
137 * sure which should go first, but I bet it won't make much
138 * difference if we are running VLANs. The good news is that
139 * this protocol won't be in the list unless compiled in, so
140 * the average user (w/out VLANs) will not be adversly affected.
157 static spinlock_t ptype_lock = SPIN_LOCK_UNLOCKED;
158 static struct list_head ptype_base[16]; /* 16 way hashed list */
159 static struct list_head ptype_all; /* Taps */
161 #ifdef OFFLINE_SAMPLE
162 static void sample_queue(unsigned long dummy);
163 static struct timer_list samp_timer = TIMER_INITIALIZER(sample_queue, 0, 0);
167 * The @dev_base list is protected by @dev_base_lock and the rtln
170 * Pure readers hold dev_base_lock for reading.
172 * Writers must hold the rtnl semaphore while they loop through the
173 * dev_base list, and hold dev_base_lock for writing when they do the
174 * actual updates. This allows pure readers to access the list even
175 * while a writer is preparing to update it.
177 * To put it another way, dev_base_lock is held for writing only to
178 * protect against pure readers; the rtnl semaphore provides the
179 * protection against other writers.
181 * See, for example usages, register_netdevice() and
182 * unregister_netdevice(), which must be called with the rtnl
185 struct net_device *dev_base;
186 struct net_device **dev_tail = &dev_base;
187 rwlock_t dev_base_lock = RW_LOCK_UNLOCKED;
189 EXPORT_SYMBOL(dev_base);
190 EXPORT_SYMBOL(dev_base_lock);
192 #define NETDEV_HASHBITS 8
193 static struct hlist_head dev_name_head[1<<NETDEV_HASHBITS];
194 static struct hlist_head dev_index_head[1<<NETDEV_HASHBITS];
196 static inline struct hlist_head *dev_name_hash(const char *name)
198 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
199 return &dev_name_head[hash & ((1<<NETDEV_HASHBITS)-1)];
202 static inline struct hlist_head *dev_index_hash(int ifindex)
204 return &dev_index_head[ifindex & ((1<<NETDEV_HASHBITS)-1)];
211 static struct notifier_block *netdev_chain;
214 * Device drivers call our routines to queue packets here. We empty the
215 * queue in the local softnet handler.
217 DEFINE_PER_CPU(struct softnet_data, softnet_data) = { 0, };
220 extern int netdev_sysfs_init(void);
221 extern int netdev_register_sysfs(struct net_device *);
222 extern void netdev_unregister_sysfs(struct net_device *);
224 #define netdev_sysfs_init() (0)
225 #define netdev_register_sysfs(dev) (0)
226 #define netdev_unregister_sysfs(dev) do { } while(0)
230 /*******************************************************************************
232 Protocol management and registration routines
234 *******************************************************************************/
243 * Add a protocol ID to the list. Now that the input handler is
244 * smarter we can dispense with all the messy stuff that used to be
247 * BEWARE!!! Protocol handlers, mangling input packets,
248 * MUST BE last in hash buckets and checking protocol handlers
249 * MUST start from promiscuous ptype_all chain in net_bh.
250 * It is true now, do not change it.
251 * Explanation follows: if protocol handler, mangling packet, will
252 * be the first on list, it is not able to sense, that packet
253 * is cloned and should be copied-on-write, so that it will
254 * change it and subsequent readers will get broken packet.
259 * dev_add_pack - add packet handler
260 * @pt: packet type declaration
262 * Add a protocol handler to the networking stack. The passed &packet_type
263 * is linked into kernel lists and may not be freed until it has been
264 * removed from the kernel lists.
266 * This call does not sleep therefore it can not
267 * guarantee all CPU's that are in middle of receiving packets
268 * will see the new packet type (until the next received packet).
271 void dev_add_pack(struct packet_type *pt)
275 spin_lock_bh(&ptype_lock);
276 if (pt->type == htons(ETH_P_ALL)) {
278 list_add_rcu(&pt->list, &ptype_all);
280 hash = ntohs(pt->type) & 15;
281 list_add_rcu(&pt->list, &ptype_base[hash]);
283 spin_unlock_bh(&ptype_lock);
286 extern void linkwatch_run_queue(void);
291 * __dev_remove_pack - remove packet handler
292 * @pt: packet type declaration
294 * Remove a protocol handler that was previously added to the kernel
295 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
296 * from the kernel lists and can be freed or reused once this function
299 * The packet type might still be in use by receivers
300 * and must not be freed until after all the CPU's have gone
301 * through a quiescent state.
303 void __dev_remove_pack(struct packet_type *pt)
305 struct list_head *head;
306 struct packet_type *pt1;
308 spin_lock_bh(&ptype_lock);
310 if (pt->type == htons(ETH_P_ALL)) {
314 head = &ptype_base[ntohs(pt->type) & 15];
316 list_for_each_entry(pt1, head, list) {
318 list_del_rcu(&pt->list);
323 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
325 spin_unlock_bh(&ptype_lock);
328 * dev_remove_pack - remove packet handler
329 * @pt: packet type declaration
331 * Remove a protocol handler that was previously added to the kernel
332 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
333 * from the kernel lists and can be freed or reused once this function
336 * This call sleeps to guarantee that no CPU is looking at the packet
339 void dev_remove_pack(struct packet_type *pt)
341 __dev_remove_pack(pt);
346 /******************************************************************************
348 Device Boot-time Settings Routines
350 *******************************************************************************/
352 /* Boot time configuration table */
353 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
356 * netdev_boot_setup_add - add new setup entry
357 * @name: name of the device
358 * @map: configured settings for the device
360 * Adds new setup entry to the dev_boot_setup list. The function
361 * returns 0 on error and 1 on success. This is a generic routine to
364 int netdev_boot_setup_add(char *name, struct ifmap *map)
366 struct netdev_boot_setup *s;
370 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
371 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
372 memset(s[i].name, 0, sizeof(s[i].name));
373 strcpy(s[i].name, name);
374 memcpy(&s[i].map, map, sizeof(s[i].map));
379 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
383 * netdev_boot_setup_check - check boot time settings
384 * @dev: the netdevice
386 * Check boot time settings for the device.
387 * The found settings are set for the device to be used
388 * later in the device probing.
389 * Returns 0 if no settings found, 1 if they are.
391 int netdev_boot_setup_check(struct net_device *dev)
393 struct netdev_boot_setup *s = dev_boot_setup;
396 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
397 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
398 !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
399 dev->irq = s[i].map.irq;
400 dev->base_addr = s[i].map.base_addr;
401 dev->mem_start = s[i].map.mem_start;
402 dev->mem_end = s[i].map.mem_end;
411 * netdev_boot_base - get address from boot time settings
412 * @prefix: prefix for network device
413 * @unit: id for network device
415 * Check boot time settings for the base address of device.
416 * The found settings are set for the device to be used
417 * later in the device probing.
418 * Returns 0 if no settings found.
420 unsigned long netdev_boot_base(const char *prefix, int unit)
422 const struct netdev_boot_setup *s = dev_boot_setup;
426 sprintf(name, "%s%d", prefix, unit);
429 * If device already registered then return base of 1
430 * to indicate not to probe for this interface
432 if (__dev_get_by_name(name))
435 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
436 if (!strcmp(name, s[i].name))
437 return s[i].map.base_addr;
442 * Saves at boot time configured settings for any netdevice.
444 int __init netdev_boot_setup(char *str)
449 str = get_options(str, ARRAY_SIZE(ints), ints);
454 memset(&map, 0, sizeof(map));
458 map.base_addr = ints[2];
460 map.mem_start = ints[3];
462 map.mem_end = ints[4];
464 /* Add new entry to the list */
465 return netdev_boot_setup_add(str, &map);
468 __setup("netdev=", netdev_boot_setup);
470 /*******************************************************************************
472 Device Interface Subroutines
474 *******************************************************************************/
477 * __dev_get_by_name - find a device by its name
478 * @name: name to find
480 * Find an interface by name. Must be called under RTNL semaphore
481 * or @dev_base_lock. If the name is found a pointer to the device
482 * is returned. If the name is not found then %NULL is returned. The
483 * reference counters are not incremented so the caller must be
484 * careful with locks.
487 struct net_device *__dev_get_by_name(const char *name)
489 struct hlist_node *p;
491 hlist_for_each(p, dev_name_hash(name)) {
492 struct net_device *dev
493 = hlist_entry(p, struct net_device, name_hlist);
494 if (!strncmp(dev->name, name, IFNAMSIZ))
501 * dev_get_by_name - find a device by its name
502 * @name: name to find
504 * Find an interface by name. This can be called from any
505 * context and does its own locking. The returned handle has
506 * the usage count incremented and the caller must use dev_put() to
507 * release it when it is no longer needed. %NULL is returned if no
508 * matching device is found.
511 struct net_device *dev_get_by_name(const char *name)
513 struct net_device *dev;
515 read_lock(&dev_base_lock);
516 dev = __dev_get_by_name(name);
519 read_unlock(&dev_base_lock);
524 Return value is changed to int to prevent illegal usage in future.
525 It is still legal to use to check for device existence.
527 User should understand, that the result returned by this function
528 is meaningless, if it was not issued under rtnl semaphore.
532 * dev_get - test if a device exists
533 * @name: name to test for
535 * Test if a name exists. Returns true if the name is found. In order
536 * to be sure the name is not allocated or removed during the test the
537 * caller must hold the rtnl semaphore.
539 * This function exists only for back compatibility with older
542 int __dev_get(const char *name)
544 struct net_device *dev;
546 read_lock(&dev_base_lock);
547 dev = __dev_get_by_name(name);
548 read_unlock(&dev_base_lock);
553 * __dev_get_by_index - find a device by its ifindex
554 * @ifindex: index of device
556 * Search for an interface by index. Returns %NULL if the device
557 * is not found or a pointer to the device. The device has not
558 * had its reference counter increased so the caller must be careful
559 * about locking. The caller must hold either the RTNL semaphore
563 struct net_device *__dev_get_by_index(int ifindex)
565 struct hlist_node *p;
567 hlist_for_each(p, dev_index_hash(ifindex)) {
568 struct net_device *dev
569 = hlist_entry(p, struct net_device, index_hlist);
570 if (dev->ifindex == ifindex)
578 * dev_get_by_index - find a device by its ifindex
579 * @ifindex: index of device
581 * Search for an interface by index. Returns NULL if the device
582 * is not found or a pointer to the device. The device returned has
583 * had a reference added and the pointer is safe until the user calls
584 * dev_put to indicate they have finished with it.
587 struct net_device *dev_get_by_index(int ifindex)
589 struct net_device *dev;
591 read_lock(&dev_base_lock);
592 dev = __dev_get_by_index(ifindex);
595 read_unlock(&dev_base_lock);
600 * dev_getbyhwaddr - find a device by its hardware address
601 * @type: media type of device
602 * @ha: hardware address
604 * Search for an interface by MAC address. Returns NULL if the device
605 * is not found or a pointer to the device. The caller must hold the
606 * rtnl semaphore. The returned device has not had its ref count increased
607 * and the caller must therefore be careful about locking
610 * If the API was consistent this would be __dev_get_by_hwaddr
613 struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
615 struct net_device *dev;
619 for (dev = dev_base; dev; dev = dev->next)
620 if (dev->type == type &&
621 !memcmp(dev->dev_addr, ha, dev->addr_len))
626 struct net_device *__dev_getfirstbyhwtype(unsigned short type)
628 struct net_device *dev;
630 for (dev = dev_base; dev; dev = dev->next)
631 if (dev->type == type)
636 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
638 struct net_device *dev_getfirstbyhwtype(unsigned short type)
640 struct net_device *dev;
643 dev = __dev_getfirstbyhwtype(type);
650 EXPORT_SYMBOL(dev_getfirstbyhwtype);
653 * dev_get_by_flags - find any device with given flags
654 * @if_flags: IFF_* values
655 * @mask: bitmask of bits in if_flags to check
657 * Search for any interface with the given flags. Returns NULL if a device
658 * is not found or a pointer to the device. The device returned has
659 * had a reference added and the pointer is safe until the user calls
660 * dev_put to indicate they have finished with it.
663 struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask)
665 struct net_device *dev;
667 read_lock(&dev_base_lock);
668 dev = __dev_get_by_flags(if_flags, mask);
671 read_unlock(&dev_base_lock);
676 * __dev_get_by_flags - find any device with given flags
677 * @if_flags: IFF_* values
678 * @mask: bitmask of bits in if_flags to check
680 * Search for any interface with the given flags. Returns NULL if a device
681 * is not found or a pointer to the device. The caller must hold either
682 * the RTNL semaphore or @dev_base_lock.
685 struct net_device *__dev_get_by_flags(unsigned short if_flags, unsigned short mask)
687 struct net_device *dev;
689 for (dev = dev_base; dev != NULL; dev = dev->next) {
690 if (((dev->flags ^ if_flags) & mask) == 0)
697 * dev_valid_name - check if name is okay for network device
700 * Network device names need to be valid file names to
701 * to allow sysfs to work
703 int dev_valid_name(const char *name)
705 return !(*name == '\0'
706 || !strcmp(name, ".")
707 || !strcmp(name, "..")
708 || strchr(name, '/'));
712 * dev_alloc_name - allocate a name for a device
714 * @name: name format string
716 * Passed a format string - eg "lt%d" it will try and find a suitable
717 * id. Not efficient for many devices, not called a lot. The caller
718 * must hold the dev_base or rtnl lock while allocating the name and
719 * adding the device in order to avoid duplicates. Returns the number
720 * of the unit assigned or a negative errno code.
723 int dev_alloc_name(struct net_device *dev, const char *name)
728 const int max_netdevices = 8*PAGE_SIZE;
730 struct net_device *d;
732 p = strnchr(name, IFNAMSIZ-1, '%');
735 * Verify the string as this thing may have come from
736 * the user. There must be either one "%d" and no other "%"
739 if (p[1] != 'd' || strchr(p + 2, '%'))
742 /* Use one page as a bit array of possible slots */
743 inuse = (long *) get_zeroed_page(GFP_ATOMIC);
747 for (d = dev_base; d; d = d->next) {
748 if (!sscanf(d->name, name, &i))
750 if (i < 0 || i >= max_netdevices)
753 /* avoid cases where sscanf is not exact inverse of printf */
754 snprintf(buf, sizeof(buf), name, i);
755 if (!strncmp(buf, d->name, IFNAMSIZ))
759 i = find_first_zero_bit(inuse, max_netdevices);
760 free_page((unsigned long) inuse);
763 snprintf(buf, sizeof(buf), name, i);
764 if (!__dev_get_by_name(buf)) {
765 strlcpy(dev->name, buf, IFNAMSIZ);
769 /* It is possible to run out of possible slots
770 * when the name is long and there isn't enough space left
771 * for the digits, or if all bits are used.
778 * dev_change_name - change name of a device
780 * @newname: name (or format string) must be at least IFNAMSIZ
782 * Change name of a device, can pass format strings "eth%d".
785 int dev_change_name(struct net_device *dev, char *newname)
791 if (dev->flags & IFF_UP)
794 if (!dev_valid_name(newname))
797 if (strchr(newname, '%')) {
798 err = dev_alloc_name(dev, newname);
801 strcpy(newname, dev->name);
803 else if (__dev_get_by_name(newname))
806 strlcpy(dev->name, newname, IFNAMSIZ);
808 err = class_device_rename(&dev->class_dev, dev->name);
810 hlist_del(&dev->name_hlist);
811 hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
812 notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
819 * netdev_state_change - device changes state
820 * @dev: device to cause notification
822 * Called to indicate a device has changed state. This function calls
823 * the notifier chains for netdev_chain and sends a NEWLINK message
824 * to the routing socket.
826 void netdev_state_change(struct net_device *dev)
828 if (dev->flags & IFF_UP) {
829 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
830 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
835 * dev_load - load a network module
836 * @name: name of interface
838 * If a network interface is not present and the process has suitable
839 * privileges this function loads the module. If module loading is not
840 * available in this kernel then it becomes a nop.
843 void dev_load(const char *name)
845 struct net_device *dev;
847 read_lock(&dev_base_lock);
848 dev = __dev_get_by_name(name);
849 read_unlock(&dev_base_lock);
851 if (!dev && capable(CAP_SYS_MODULE))
852 request_module("%s", name);
855 static int default_rebuild_header(struct sk_buff *skb)
857 printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n",
858 skb->dev ? skb->dev->name : "NULL!!!");
865 * dev_open - prepare an interface for use.
866 * @dev: device to open
868 * Takes a device from down to up state. The device's private open
869 * function is invoked and then the multicast lists are loaded. Finally
870 * the device is moved into the up state and a %NETDEV_UP message is
871 * sent to the netdev notifier chain.
873 * Calling this function on an active interface is a nop. On a failure
874 * a negative errno code is returned.
876 int dev_open(struct net_device *dev)
884 if (dev->flags & IFF_UP)
888 * Is it even present?
890 if (!netif_device_present(dev))
894 * Call device private open method
896 set_bit(__LINK_STATE_START, &dev->state);
898 ret = dev->open(dev);
900 clear_bit(__LINK_STATE_START, &dev->state);
904 * If it went open OK then:
911 dev->flags |= IFF_UP;
914 * Initialize multicasting status
919 * Wakeup transmit queue engine
924 * ... and announce new interface.
926 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
932 * dev_close - shutdown an interface.
933 * @dev: device to shutdown
935 * This function moves an active device into down state. A
936 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
937 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
940 int dev_close(struct net_device *dev)
942 if (!(dev->flags & IFF_UP))
946 * Tell people we are going down, so that they can
947 * prepare to death, when device is still operating.
949 notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
953 clear_bit(__LINK_STATE_START, &dev->state);
955 /* Synchronize to scheduled poll. We cannot touch poll list,
956 * it can be even on different cpu. So just clear netif_running(),
957 * and wait when poll really will happen. Actually, the best place
958 * for this is inside dev->stop() after device stopped its irq
959 * engine, but this requires more changes in devices. */
961 smp_mb__after_clear_bit(); /* Commit netif_running(). */
962 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
964 current->state = TASK_INTERRUPTIBLE;
969 * Call the device specific close. This cannot fail.
970 * Only if device is UP
972 * We allow it to be called even after a DETACH hot-plug
979 * Device is now down.
982 dev->flags &= ~IFF_UP;
985 * Tell people we are down
987 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
994 * Device change register/unregister. These are not inline or static
995 * as we export them to the world.
999 * register_netdevice_notifier - register a network notifier block
1002 * Register a notifier to be called when network device events occur.
1003 * The notifier passed is linked into the kernel structures and must
1004 * not be reused until it has been unregistered. A negative errno code
1005 * is returned on a failure.
1007 * When registered all registration and up events are replayed
1008 * to the new notifier to allow device to have a race free
1009 * view of the network device list.
1012 int register_netdevice_notifier(struct notifier_block *nb)
1014 struct net_device *dev;
1018 err = notifier_chain_register(&netdev_chain, nb);
1020 for (dev = dev_base; dev; dev = dev->next) {
1021 nb->notifier_call(nb, NETDEV_REGISTER, dev);
1023 if (dev->flags & IFF_UP)
1024 nb->notifier_call(nb, NETDEV_UP, dev);
1032 * unregister_netdevice_notifier - unregister a network notifier block
1035 * Unregister a notifier previously registered by
1036 * register_netdevice_notifier(). The notifier is unlinked into the
1037 * kernel structures and may then be reused. A negative errno code
1038 * is returned on a failure.
1041 int unregister_netdevice_notifier(struct notifier_block *nb)
1043 return notifier_chain_unregister(&netdev_chain, nb);
1047 * call_netdevice_notifiers - call all network notifier blocks
1048 * @val: value passed unmodified to notifier function
1049 * @v: pointer passed unmodified to notifier function
1051 * Call all network notifier blocks. Parameters and return value
1052 * are as for notifier_call_chain().
1055 int call_netdevice_notifiers(unsigned long val, void *v)
1057 return notifier_call_chain(&netdev_chain, val, v);
1061 * Support routine. Sends outgoing frames to any network
1062 * taps currently in use.
1065 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1067 struct packet_type *ptype;
1068 net_timestamp(&skb->stamp);
1071 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1072 /* Never send packets back to the socket
1073 * they originated from - MvS (miquels@drinkel.ow.org)
1075 if ((ptype->dev == dev || !ptype->dev) &&
1076 (ptype->af_packet_priv == NULL ||
1077 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1078 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1082 /* skb->nh should be correctly
1083 set by sender, so that the second statement is
1084 just protection against buggy protocols.
1086 skb2->mac.raw = skb2->data;
1088 if (skb2->nh.raw < skb2->data ||
1089 skb2->nh.raw > skb2->tail) {
1090 if (net_ratelimit())
1091 printk(KERN_CRIT "protocol %04x is "
1093 skb2->protocol, dev->name);
1094 skb2->nh.raw = skb2->data;
1097 skb2->h.raw = skb2->nh.raw;
1098 skb2->pkt_type = PACKET_OUTGOING;
1099 ptype->func(skb2, skb->dev, ptype);
1106 * Invalidate hardware checksum when packet is to be mangled, and
1107 * complete checksum manually on outgoing path.
1109 int skb_checksum_help(struct sk_buff **pskb, int inward)
1112 int ret = 0, offset = (*pskb)->h.raw - (*pskb)->data;
1115 (*pskb)->ip_summed = CHECKSUM_NONE;
1119 if (skb_cloned(*pskb)) {
1120 ret = pskb_expand_head(*pskb, 0, 0, GFP_ATOMIC);
1125 if (offset > (int)(*pskb)->len)
1127 csum = skb_checksum(*pskb, offset, (*pskb)->len-offset, 0);
1129 offset = (*pskb)->tail - (*pskb)->h.raw;
1132 if ((*pskb)->csum + 2 > offset)
1135 *(u16*)((*pskb)->h.raw + (*pskb)->csum) = csum_fold(csum);
1136 (*pskb)->ip_summed = CHECKSUM_NONE;
1141 #ifdef CONFIG_HIGHMEM
1142 /* Actually, we should eliminate this check as soon as we know, that:
1143 * 1. IOMMU is present and allows to map all the memory.
1144 * 2. No high memory really exists on this machine.
1147 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1151 if (dev->features & NETIF_F_HIGHDMA)
1154 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1155 if (skb_shinfo(skb)->frags[i].page >= highmem_start_page)
1161 #define illegal_highdma(dev, skb) (0)
1164 extern void skb_release_data(struct sk_buff *);
1166 /* Keep head the same: replace data */
1167 int __skb_linearize(struct sk_buff *skb, int gfp_mask)
1172 struct skb_shared_info *ninfo;
1173 int headerlen = skb->data - skb->head;
1174 int expand = (skb->tail + skb->data_len) - skb->end;
1176 if (skb_shared(skb))
1182 size = skb->end - skb->head + expand;
1183 size = SKB_DATA_ALIGN(size);
1184 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
1188 /* Copy entire thing */
1189 if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
1193 ninfo = (struct skb_shared_info*)(data + size);
1194 atomic_set(&ninfo->dataref, 1);
1195 ninfo->tso_size = skb_shinfo(skb)->tso_size;
1196 ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
1197 ninfo->nr_frags = 0;
1198 ninfo->frag_list = NULL;
1200 /* Offset between the two in bytes */
1201 offset = data - skb->head;
1203 /* Free old data. */
1204 skb_release_data(skb);
1207 skb->end = data + size;
1209 /* Set up new pointers */
1210 skb->h.raw += offset;
1211 skb->nh.raw += offset;
1212 skb->mac.raw += offset;
1213 skb->tail += offset;
1214 skb->data += offset;
1216 /* We are no longer a clone, even if we were. */
1219 skb->tail += skb->data_len;
1224 #define HARD_TX_LOCK(dev, cpu) { \
1225 if ((dev->features & NETIF_F_LLTX) == 0) { \
1226 spin_lock(&dev->xmit_lock); \
1227 dev->xmit_lock_owner = cpu; \
1231 #define HARD_TX_UNLOCK(dev) { \
1232 if ((dev->features & NETIF_F_LLTX) == 0) { \
1233 dev->xmit_lock_owner = -1; \
1234 spin_unlock(&dev->xmit_lock); \
1238 static inline void qdisc_run(struct net_device *dev)
1240 while (!netif_queue_stopped(dev) &&
1241 qdisc_restart(dev)<0)
1246 * dev_queue_xmit - transmit a buffer
1247 * @skb: buffer to transmit
1249 * Queue a buffer for transmission to a network device. The caller must
1250 * have set the device and priority and built the buffer before calling
1251 * this function. The function can be called from an interrupt.
1253 * A negative errno code is returned on a failure. A success does not
1254 * guarantee the frame will be transmitted as it may be dropped due
1255 * to congestion or traffic shaping.
1258 int dev_queue_xmit(struct sk_buff *skb)
1260 struct net_device *dev = skb->dev;
1264 if (skb_shinfo(skb)->frag_list &&
1265 !(dev->features & NETIF_F_FRAGLIST) &&
1266 __skb_linearize(skb, GFP_ATOMIC))
1269 /* Fragmented skb is linearized if device does not support SG,
1270 * or if at least one of fragments is in highmem and device
1271 * does not support DMA from it.
1273 if (skb_shinfo(skb)->nr_frags &&
1274 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1275 __skb_linearize(skb, GFP_ATOMIC))
1278 /* If packet is not checksummed and device does not support
1279 * checksumming for this protocol, complete checksumming here.
1281 if (skb->ip_summed == CHECKSUM_HW &&
1282 (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) &&
1283 (!(dev->features & NETIF_F_IP_CSUM) ||
1284 skb->protocol != htons(ETH_P_IP))))
1285 if (skb_checksum_help(&skb, 0))
1289 /* Disable soft irqs for various locks below. Also
1290 * stops preemption for RCU.
1294 /* Updates of qdisc are serialized by queue_lock.
1295 * The struct Qdisc which is pointed to by qdisc is now a
1296 * rcu structure - it may be accessed without acquiring
1297 * a lock (but the structure may be stale.) The freeing of the
1298 * qdisc will be deferred until it's known that there are no
1299 * more references to it.
1301 * If the qdisc has an enqueue function, we still need to
1302 * hold the queue_lock before calling it, since queue_lock
1303 * also serializes access to the device queue.
1306 q = rcu_dereference(dev->qdisc);
1307 #ifdef CONFIG_NET_CLS_ACT
1308 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1311 /* Grab device queue */
1312 spin_lock(&dev->queue_lock);
1314 rc = q->enqueue(skb, q);
1318 spin_unlock(&dev->queue_lock);
1319 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1323 /* The device has no queue. Common case for software devices:
1324 loopback, all the sorts of tunnels...
1326 Really, it is unlikely that xmit_lock protection is necessary here.
1327 (f.e. loopback and IP tunnels are clean ignoring statistics
1329 However, it is possible, that they rely on protection
1332 Check this and shot the lock. It is not prone from deadlocks.
1333 Either shot noqueue qdisc, it is even simpler 8)
1335 if (dev->flags & IFF_UP) {
1336 int cpu = smp_processor_id(); /* ok because BHs are off */
1338 if (dev->xmit_lock_owner != cpu) {
1340 HARD_TX_LOCK(dev, cpu);
1342 if (!netif_queue_stopped(dev)) {
1344 dev_queue_xmit_nit(skb, dev);
1347 if (!dev->hard_start_xmit(skb, dev)) {
1348 HARD_TX_UNLOCK(dev);
1352 HARD_TX_UNLOCK(dev);
1353 if (net_ratelimit())
1354 printk(KERN_CRIT "Virtual device %s asks to "
1355 "queue packet!\n", dev->name);
1358 /* Recursion is detected! It is possible,
1360 if (net_ratelimit())
1361 printk(KERN_CRIT "Dead loop on virtual device "
1362 "%s, fix it urgently!\n", dev->name);
1375 /*=======================================================================
1377 =======================================================================*/
1379 int netdev_max_backlog = 300;
1380 int weight_p = 64; /* old backlog weight */
1381 /* These numbers are selected based on intuition and some
1382 * experimentatiom, if you have more scientific way of doing this
1383 * please go ahead and fix things.
1385 int no_cong_thresh = 10;
1390 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1393 #ifdef CONFIG_NET_HW_FLOWCONTROL
1394 atomic_t netdev_dropping = ATOMIC_INIT(0);
1395 static unsigned long netdev_fc_mask = 1;
1396 unsigned long netdev_fc_xoff;
1397 spinlock_t netdev_fc_lock = SPIN_LOCK_UNLOCKED;
1401 void (*stimul)(struct net_device *);
1402 struct net_device *dev;
1403 } netdev_fc_slots[BITS_PER_LONG];
1405 int netdev_register_fc(struct net_device *dev,
1406 void (*stimul)(struct net_device *dev))
1409 unsigned long flags;
1411 spin_lock_irqsave(&netdev_fc_lock, flags);
1412 if (netdev_fc_mask != ~0UL) {
1413 bit = ffz(netdev_fc_mask);
1414 netdev_fc_slots[bit].stimul = stimul;
1415 netdev_fc_slots[bit].dev = dev;
1416 set_bit(bit, &netdev_fc_mask);
1417 clear_bit(bit, &netdev_fc_xoff);
1419 spin_unlock_irqrestore(&netdev_fc_lock, flags);
1423 void netdev_unregister_fc(int bit)
1425 unsigned long flags;
1427 spin_lock_irqsave(&netdev_fc_lock, flags);
1429 netdev_fc_slots[bit].stimul = NULL;
1430 netdev_fc_slots[bit].dev = NULL;
1431 clear_bit(bit, &netdev_fc_mask);
1432 clear_bit(bit, &netdev_fc_xoff);
1434 spin_unlock_irqrestore(&netdev_fc_lock, flags);
1437 static void netdev_wakeup(void)
1441 spin_lock(&netdev_fc_lock);
1442 xoff = netdev_fc_xoff;
1447 netdev_fc_slots[i].stimul(netdev_fc_slots[i].dev);
1449 spin_unlock(&netdev_fc_lock);
1453 static void get_sample_stats(int cpu)
1459 struct softnet_data *sd = &per_cpu(softnet_data, cpu);
1460 int blog = sd->input_pkt_queue.qlen;
1461 int avg_blog = sd->avg_blog;
1463 avg_blog = (avg_blog >> 1) + (blog >> 1);
1465 if (avg_blog > mod_cong) {
1466 /* Above moderate congestion levels. */
1467 sd->cng_level = NET_RX_CN_HIGH;
1470 rq = rd % netdev_max_backlog;
1471 if (rq < avg_blog) /* unlucky bastard */
1472 sd->cng_level = NET_RX_DROP;
1474 } else if (avg_blog > lo_cong) {
1475 sd->cng_level = NET_RX_CN_MOD;
1478 rq = rd % netdev_max_backlog;
1479 if (rq < avg_blog) /* unlucky bastard */
1480 sd->cng_level = NET_RX_CN_HIGH;
1482 } else if (avg_blog > no_cong)
1483 sd->cng_level = NET_RX_CN_LOW;
1484 else /* no congestion */
1485 sd->cng_level = NET_RX_SUCCESS;
1487 sd->avg_blog = avg_blog;
1490 #ifdef OFFLINE_SAMPLE
1491 static void sample_queue(unsigned long dummy)
1493 /* 10 ms 0r 1ms -- i don't care -- JHS */
1495 int cpu = smp_processor_id();
1497 get_sample_stats(cpu);
1498 next_tick += jiffies;
1499 mod_timer(&samp_timer, next_tick);
1505 * netif_rx - post buffer to the network code
1506 * @skb: buffer to post
1508 * This function receives a packet from a device driver and queues it for
1509 * the upper (protocol) levels to process. It always succeeds. The buffer
1510 * may be dropped during processing for congestion control or by the
1514 * NET_RX_SUCCESS (no congestion)
1515 * NET_RX_CN_LOW (low congestion)
1516 * NET_RX_CN_MOD (moderate congestion)
1517 * NET_RX_CN_HIGH (high congestion)
1518 * NET_RX_DROP (packet was dropped)
1522 int netif_rx(struct sk_buff *skb)
1525 struct softnet_data *queue;
1526 unsigned long flags;
1528 #ifdef CONFIG_NETPOLL
1529 if (skb->dev->netpoll_rx && netpoll_rx(skb)) {
1535 if (!skb->stamp.tv_sec)
1536 net_timestamp(&skb->stamp);
1539 * The code is rearranged so that the path is the most
1540 * short when CPU is congested, but is still operating.
1542 local_irq_save(flags);
1543 this_cpu = smp_processor_id();
1544 queue = &__get_cpu_var(softnet_data);
1546 __get_cpu_var(netdev_rx_stat).total++;
1547 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1548 if (queue->input_pkt_queue.qlen) {
1549 if (queue->throttle)
1554 __skb_queue_tail(&queue->input_pkt_queue, skb);
1555 #ifndef OFFLINE_SAMPLE
1556 get_sample_stats(this_cpu);
1558 local_irq_restore(flags);
1559 return queue->cng_level;
1562 if (queue->throttle) {
1563 queue->throttle = 0;
1564 #ifdef CONFIG_NET_HW_FLOWCONTROL
1565 if (atomic_dec_and_test(&netdev_dropping))
1570 netif_rx_schedule(&queue->backlog_dev);
1574 if (!queue->throttle) {
1575 queue->throttle = 1;
1576 __get_cpu_var(netdev_rx_stat).throttled++;
1577 #ifdef CONFIG_NET_HW_FLOWCONTROL
1578 atomic_inc(&netdev_dropping);
1583 __get_cpu_var(netdev_rx_stat).dropped++;
1584 local_irq_restore(flags);
1590 static __inline__ void skb_bond(struct sk_buff *skb)
1592 struct net_device *dev = skb->dev;
1595 skb->real_dev = skb->dev;
1596 skb->dev = dev->master;
1600 static void net_tx_action(struct softirq_action *h)
1602 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1604 if (sd->completion_queue) {
1605 struct sk_buff *clist;
1607 local_irq_disable();
1608 clist = sd->completion_queue;
1609 sd->completion_queue = NULL;
1613 struct sk_buff *skb = clist;
1614 clist = clist->next;
1616 BUG_TRAP(!atomic_read(&skb->users));
1621 if (sd->output_queue) {
1622 struct net_device *head;
1624 local_irq_disable();
1625 head = sd->output_queue;
1626 sd->output_queue = NULL;
1630 struct net_device *dev = head;
1631 head = head->next_sched;
1633 smp_mb__before_clear_bit();
1634 clear_bit(__LINK_STATE_SCHED, &dev->state);
1636 if (spin_trylock(&dev->queue_lock)) {
1638 spin_unlock(&dev->queue_lock);
1640 netif_schedule(dev);
1646 static __inline__ int deliver_skb(struct sk_buff *skb,
1647 struct packet_type *pt_prev)
1649 atomic_inc(&skb->users);
1650 return pt_prev->func(skb, skb->dev, pt_prev);
1653 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
1654 int (*br_handle_frame_hook)(struct net_bridge_port *p, struct sk_buff **pskb);
1656 static __inline__ int handle_bridge(struct sk_buff **pskb,
1657 struct packet_type **pt_prev, int *ret)
1659 struct net_bridge_port *port;
1661 if ((*pskb)->pkt_type == PACKET_LOOPBACK ||
1662 (port = rcu_dereference((*pskb)->dev->br_port)) == NULL)
1666 *ret = deliver_skb(*pskb, *pt_prev);
1670 return br_handle_frame_hook(port, pskb);
1673 #define handle_bridge(skb, pt_prev, ret) (0)
1676 #ifdef CONFIG_NET_CLS_ACT
1677 /* TODO: Maybe we should just force sch_ingress to be compiled in
1678 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
1679 * a compare and 2 stores extra right now if we dont have it on
1680 * but have CONFIG_NET_CLS_ACT
1681 * NOTE: This doesnt stop any functionality; if you dont have
1682 * the ingress scheduler, you just cant add policies on ingress.
1685 int ing_filter(struct sk_buff *skb)
1688 struct net_device *dev = skb->dev;
1689 int result = TC_ACT_OK;
1691 if (dev->qdisc_ingress) {
1692 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
1693 if (MAX_RED_LOOP < ttl++) {
1694 printk("Redir loop detected Dropping packet (%s->%s)\n",
1695 skb->input_dev?skb->input_dev->name:"??",skb->dev->name);
1699 skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl);
1701 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
1702 if (NULL == skb->input_dev) {
1703 skb->input_dev = skb->dev;
1704 printk("ing_filter: fixed %s out %s\n",skb->input_dev->name,skb->dev->name);
1706 spin_lock(&dev->ingress_lock);
1707 if ((q = dev->qdisc_ingress) != NULL)
1708 result = q->enqueue(skb, q);
1709 spin_unlock(&dev->ingress_lock);
1717 int netif_receive_skb(struct sk_buff *skb)
1719 struct packet_type *ptype, *pt_prev;
1720 int ret = NET_RX_DROP;
1721 unsigned short type;
1723 #ifdef CONFIG_NETPOLL
1724 if (skb->dev->netpoll_rx && skb->dev->poll && netpoll_rx(skb)) {
1730 if (!skb->stamp.tv_sec)
1731 net_timestamp(&skb->stamp);
1735 __get_cpu_var(netdev_rx_stat).total++;
1737 skb->h.raw = skb->nh.raw = skb->data;
1738 skb->mac_len = skb->nh.raw - skb->mac.raw;
1744 #ifdef CONFIG_NET_CLS_ACT
1745 if (skb->tc_verd & TC_NCLS) {
1746 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
1751 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1752 if (!ptype->dev || ptype->dev == skb->dev) {
1754 ret = deliver_skb(skb, pt_prev);
1759 #ifdef CONFIG_NET_CLS_ACT
1761 ret = deliver_skb(skb, pt_prev);
1762 pt_prev = NULL; /* noone else should process this after*/
1764 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
1767 ret = ing_filter(skb);
1769 if (ret == TC_ACT_SHOT || (ret == TC_ACT_STOLEN)) {
1778 handle_diverter(skb);
1780 if (handle_bridge(&skb, &pt_prev, &ret))
1783 type = skb->protocol;
1784 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
1785 if (ptype->type == type &&
1786 (!ptype->dev || ptype->dev == skb->dev)) {
1788 ret = deliver_skb(skb, pt_prev);
1794 ret = pt_prev->func(skb, skb->dev, pt_prev);
1797 /* Jamal, now you will not able to escape explaining
1798 * me how you were going to use this. :-)
1808 static int process_backlog(struct net_device *backlog_dev, int *budget)
1811 int quota = min(backlog_dev->quota, *budget);
1812 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1813 unsigned long start_time = jiffies;
1816 struct sk_buff *skb;
1817 struct net_device *dev;
1819 local_irq_disable();
1820 skb = __skb_dequeue(&queue->input_pkt_queue);
1827 netif_receive_skb(skb);
1833 if (work >= quota || jiffies - start_time > 1)
1836 #ifdef CONFIG_NET_HW_FLOWCONTROL
1837 if (queue->throttle &&
1838 queue->input_pkt_queue.qlen < no_cong_thresh ) {
1839 queue->throttle = 0;
1840 if (atomic_dec_and_test(&netdev_dropping)) {
1848 backlog_dev->quota -= work;
1853 backlog_dev->quota -= work;
1856 list_del(&backlog_dev->poll_list);
1857 smp_mb__before_clear_bit();
1858 netif_poll_enable(backlog_dev);
1860 if (queue->throttle) {
1861 queue->throttle = 0;
1862 #ifdef CONFIG_NET_HW_FLOWCONTROL
1863 if (atomic_dec_and_test(&netdev_dropping))
1871 static void net_rx_action(struct softirq_action *h)
1873 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1874 unsigned long start_time = jiffies;
1875 int budget = netdev_max_backlog;
1878 local_irq_disable();
1880 while (!list_empty(&queue->poll_list)) {
1881 struct net_device *dev;
1883 if (budget <= 0 || jiffies - start_time > 1)
1888 dev = list_entry(queue->poll_list.next,
1889 struct net_device, poll_list);
1891 if (dev->quota <= 0 || dev->poll(dev, &budget)) {
1892 local_irq_disable();
1893 list_del(&dev->poll_list);
1894 list_add_tail(&dev->poll_list, &queue->poll_list);
1896 dev->quota += dev->weight;
1898 dev->quota = dev->weight;
1901 local_irq_disable();
1909 __get_cpu_var(netdev_rx_stat).time_squeeze++;
1910 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
1914 static gifconf_func_t * gifconf_list [NPROTO];
1917 * register_gifconf - register a SIOCGIF handler
1918 * @family: Address family
1919 * @gifconf: Function handler
1921 * Register protocol dependent address dumping routines. The handler
1922 * that is passed must not be freed or reused until it has been replaced
1923 * by another handler.
1925 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
1927 if (family >= NPROTO)
1929 gifconf_list[family] = gifconf;
1935 * Map an interface index to its name (SIOCGIFNAME)
1939 * We need this ioctl for efficient implementation of the
1940 * if_indextoname() function required by the IPv6 API. Without
1941 * it, we would have to search all the interfaces to find a
1945 static int dev_ifname(struct ifreq __user *arg)
1947 struct net_device *dev;
1951 * Fetch the caller's info block.
1954 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
1957 read_lock(&dev_base_lock);
1958 dev = __dev_get_by_index(ifr.ifr_ifindex);
1960 read_unlock(&dev_base_lock);
1964 strcpy(ifr.ifr_name, dev->name);
1965 read_unlock(&dev_base_lock);
1967 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
1973 * Perform a SIOCGIFCONF call. This structure will change
1974 * size eventually, and there is nothing I can do about it.
1975 * Thus we will need a 'compatibility mode'.
1978 static int dev_ifconf(char __user *arg)
1981 struct net_device *dev;
1988 * Fetch the caller's info block.
1991 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
1998 * Loop over the interfaces, and write an info block for each.
2002 for (dev = dev_base; dev; dev = dev->next) {
2003 for (i = 0; i < NPROTO; i++) {
2004 if (gifconf_list[i]) {
2007 done = gifconf_list[i](dev, NULL, 0);
2009 done = gifconf_list[i](dev, pos + total,
2019 * All done. Write the updated control block back to the caller.
2021 ifc.ifc_len = total;
2024 * Both BSD and Solaris return 0 here, so we do too.
2026 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2029 #ifdef CONFIG_PROC_FS
2031 * This is invoked by the /proc filesystem handler to display a device
2034 static __inline__ struct net_device *dev_get_idx(loff_t pos)
2036 struct net_device *dev;
2039 for (i = 0, dev = dev_base; dev && i < pos; ++i, dev = dev->next);
2041 return i == pos ? dev : NULL;
2044 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2046 read_lock(&dev_base_lock);
2047 return *pos ? dev_get_idx(*pos - 1) : SEQ_START_TOKEN;
2050 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2053 return v == SEQ_START_TOKEN ? dev_base : ((struct net_device *)v)->next;
2056 void dev_seq_stop(struct seq_file *seq, void *v)
2058 read_unlock(&dev_base_lock);
2061 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2063 if (dev->get_stats) {
2064 struct net_device_stats *stats = dev->get_stats(dev);
2066 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2067 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2068 dev->name, stats->rx_bytes, stats->rx_packets,
2070 stats->rx_dropped + stats->rx_missed_errors,
2071 stats->rx_fifo_errors,
2072 stats->rx_length_errors + stats->rx_over_errors +
2073 stats->rx_crc_errors + stats->rx_frame_errors,
2074 stats->rx_compressed, stats->multicast,
2075 stats->tx_bytes, stats->tx_packets,
2076 stats->tx_errors, stats->tx_dropped,
2077 stats->tx_fifo_errors, stats->collisions,
2078 stats->tx_carrier_errors +
2079 stats->tx_aborted_errors +
2080 stats->tx_window_errors +
2081 stats->tx_heartbeat_errors,
2082 stats->tx_compressed);
2084 seq_printf(seq, "%6s: No statistics available.\n", dev->name);
2088 * Called from the PROCfs module. This now uses the new arbitrary sized
2089 * /proc/net interface to create /proc/net/dev
2091 static int dev_seq_show(struct seq_file *seq, void *v)
2093 if (v == SEQ_START_TOKEN)
2094 seq_puts(seq, "Inter-| Receive "
2096 " face |bytes packets errs drop fifo frame "
2097 "compressed multicast|bytes packets errs "
2098 "drop fifo colls carrier compressed\n");
2100 dev_seq_printf_stats(seq, v);
2104 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2106 struct netif_rx_stats *rc = NULL;
2108 while (*pos < NR_CPUS)
2109 if (cpu_online(*pos)) {
2110 rc = &per_cpu(netdev_rx_stat, *pos);
2117 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2119 return softnet_get_online(pos);
2122 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2125 return softnet_get_online(pos);
2128 static void softnet_seq_stop(struct seq_file *seq, void *v)
2132 static int softnet_seq_show(struct seq_file *seq, void *v)
2134 struct netif_rx_stats *s = v;
2136 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2137 s->total, s->dropped, s->time_squeeze, s->throttled,
2138 s->fastroute_hit, s->fastroute_success, s->fastroute_defer,
2139 s->fastroute_deferred_out,
2141 s->fastroute_latency_reduction
2149 static struct seq_operations dev_seq_ops = {
2150 .start = dev_seq_start,
2151 .next = dev_seq_next,
2152 .stop = dev_seq_stop,
2153 .show = dev_seq_show,
2156 static int dev_seq_open(struct inode *inode, struct file *file)
2158 return seq_open(file, &dev_seq_ops);
2161 static struct file_operations dev_seq_fops = {
2162 .owner = THIS_MODULE,
2163 .open = dev_seq_open,
2165 .llseek = seq_lseek,
2166 .release = seq_release,
2169 static struct seq_operations softnet_seq_ops = {
2170 .start = softnet_seq_start,
2171 .next = softnet_seq_next,
2172 .stop = softnet_seq_stop,
2173 .show = softnet_seq_show,
2176 static int softnet_seq_open(struct inode *inode, struct file *file)
2178 return seq_open(file, &softnet_seq_ops);
2181 static struct file_operations softnet_seq_fops = {
2182 .owner = THIS_MODULE,
2183 .open = softnet_seq_open,
2185 .llseek = seq_lseek,
2186 .release = seq_release,
2190 extern int wireless_proc_init(void);
2192 #define wireless_proc_init() 0
2195 static int __init dev_proc_init(void)
2199 if (!proc_net_fops_create("dev", S_IRUGO, &dev_seq_fops))
2201 if (!proc_net_fops_create("softnet_stat", S_IRUGO, &softnet_seq_fops))
2203 if (wireless_proc_init())
2209 proc_net_remove("softnet_stat");
2211 proc_net_remove("dev");
2215 #define dev_proc_init() 0
2216 #endif /* CONFIG_PROC_FS */
2220 * netdev_set_master - set up master/slave pair
2221 * @slave: slave device
2222 * @master: new master device
2224 * Changes the master device of the slave. Pass %NULL to break the
2225 * bonding. The caller must hold the RTNL semaphore. On a failure
2226 * a negative errno code is returned. On success the reference counts
2227 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2228 * function returns zero.
2230 int netdev_set_master(struct net_device *slave, struct net_device *master)
2232 struct net_device *old = slave->master;
2242 slave->master = master;
2250 slave->flags |= IFF_SLAVE;
2252 slave->flags &= ~IFF_SLAVE;
2254 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2259 * dev_set_promiscuity - update promiscuity count on a device
2263 * Add or remove promsicuity from a device. While the count in the device
2264 * remains above zero the interface remains promiscuous. Once it hits zero
2265 * the device reverts back to normal filtering operation. A negative inc
2266 * value is used to drop promiscuity on the device.
2268 void dev_set_promiscuity(struct net_device *dev, int inc)
2270 unsigned short old_flags = dev->flags;
2272 dev->flags |= IFF_PROMISC;
2273 if ((dev->promiscuity += inc) == 0)
2274 dev->flags &= ~IFF_PROMISC;
2275 if (dev->flags ^ old_flags) {
2277 printk(KERN_INFO "device %s %s promiscuous mode\n",
2278 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2284 * dev_set_allmulti - update allmulti count on a device
2288 * Add or remove reception of all multicast frames to a device. While the
2289 * count in the device remains above zero the interface remains listening
2290 * to all interfaces. Once it hits zero the device reverts back to normal
2291 * filtering operation. A negative @inc value is used to drop the counter
2292 * when releasing a resource needing all multicasts.
2295 void dev_set_allmulti(struct net_device *dev, int inc)
2297 unsigned short old_flags = dev->flags;
2299 dev->flags |= IFF_ALLMULTI;
2300 if ((dev->allmulti += inc) == 0)
2301 dev->flags &= ~IFF_ALLMULTI;
2302 if (dev->flags ^ old_flags)
2306 unsigned dev_get_flags(const struct net_device *dev)
2310 flags = (dev->flags & ~(IFF_PROMISC |
2313 (dev->gflags & (IFF_PROMISC |
2316 if (netif_running(dev) && netif_carrier_ok(dev))
2317 flags |= IFF_RUNNING;
2322 int dev_change_flags(struct net_device *dev, unsigned flags)
2325 int old_flags = dev->flags;
2328 * Set the flags on our device.
2331 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
2332 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
2334 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
2338 * Load in the correct multicast list now the flags have changed.
2344 * Have we downed the interface. We handle IFF_UP ourselves
2345 * according to user attempts to set it, rather than blindly
2350 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
2351 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
2357 if (dev->flags & IFF_UP &&
2358 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
2360 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
2362 if ((flags ^ dev->gflags) & IFF_PROMISC) {
2363 int inc = (flags & IFF_PROMISC) ? +1 : -1;
2364 dev->gflags ^= IFF_PROMISC;
2365 dev_set_promiscuity(dev, inc);
2368 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
2369 is important. Some (broken) drivers set IFF_PROMISC, when
2370 IFF_ALLMULTI is requested not asking us and not reporting.
2372 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
2373 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
2374 dev->gflags ^= IFF_ALLMULTI;
2375 dev_set_allmulti(dev, inc);
2378 if (old_flags ^ dev->flags)
2379 rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags ^ dev->flags);
2384 int dev_set_mtu(struct net_device *dev, int new_mtu)
2388 if (new_mtu == dev->mtu)
2391 /* MTU must be positive. */
2395 if (!netif_device_present(dev))
2399 if (dev->change_mtu)
2400 err = dev->change_mtu(dev, new_mtu);
2403 if (!err && dev->flags & IFF_UP)
2404 notifier_call_chain(&netdev_chain,
2405 NETDEV_CHANGEMTU, dev);
2411 * Perform the SIOCxIFxxx calls.
2413 static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
2416 struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
2422 case SIOCGIFFLAGS: /* Get interface flags */
2423 ifr->ifr_flags = dev_get_flags(dev);
2426 case SIOCSIFFLAGS: /* Set interface flags */
2427 return dev_change_flags(dev, ifr->ifr_flags);
2429 case SIOCGIFMETRIC: /* Get the metric on the interface
2430 (currently unused) */
2431 ifr->ifr_metric = 0;
2434 case SIOCSIFMETRIC: /* Set the metric on the interface
2435 (currently unused) */
2438 case SIOCGIFMTU: /* Get the MTU of a device */
2439 ifr->ifr_mtu = dev->mtu;
2442 case SIOCSIFMTU: /* Set the MTU of a device */
2443 return dev_set_mtu(dev, ifr->ifr_mtu);
2446 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
2447 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2448 ifr->ifr_hwaddr.sa_family = dev->type;
2452 if (!dev->set_mac_address)
2454 if (ifr->ifr_hwaddr.sa_family != dev->type)
2456 if (!netif_device_present(dev))
2458 err = dev->set_mac_address(dev, &ifr->ifr_hwaddr);
2460 notifier_call_chain(&netdev_chain,
2461 NETDEV_CHANGEADDR, dev);
2464 case SIOCSIFHWBROADCAST:
2465 if (ifr->ifr_hwaddr.sa_family != dev->type)
2467 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
2468 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2469 notifier_call_chain(&netdev_chain,
2470 NETDEV_CHANGEADDR, dev);
2474 ifr->ifr_map.mem_start = dev->mem_start;
2475 ifr->ifr_map.mem_end = dev->mem_end;
2476 ifr->ifr_map.base_addr = dev->base_addr;
2477 ifr->ifr_map.irq = dev->irq;
2478 ifr->ifr_map.dma = dev->dma;
2479 ifr->ifr_map.port = dev->if_port;
2483 if (dev->set_config) {
2484 if (!netif_device_present(dev))
2486 return dev->set_config(dev, &ifr->ifr_map);
2491 if (!dev->set_multicast_list ||
2492 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2494 if (!netif_device_present(dev))
2496 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
2500 if (!dev->set_multicast_list ||
2501 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2503 if (!netif_device_present(dev))
2505 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
2509 ifr->ifr_ifindex = dev->ifindex;
2513 ifr->ifr_qlen = dev->tx_queue_len;
2517 if (ifr->ifr_qlen < 0)
2519 dev->tx_queue_len = ifr->ifr_qlen;
2523 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
2524 return dev_change_name(dev, ifr->ifr_newname);
2527 * Unknown or private ioctl
2531 if ((cmd >= SIOCDEVPRIVATE &&
2532 cmd <= SIOCDEVPRIVATE + 15) ||
2533 cmd == SIOCBONDENSLAVE ||
2534 cmd == SIOCBONDRELEASE ||
2535 cmd == SIOCBONDSETHWADDR ||
2536 cmd == SIOCBONDSLAVEINFOQUERY ||
2537 cmd == SIOCBONDINFOQUERY ||
2538 cmd == SIOCBONDCHANGEACTIVE ||
2539 cmd == SIOCGMIIPHY ||
2540 cmd == SIOCGMIIREG ||
2541 cmd == SIOCSMIIREG ||
2542 cmd == SIOCBRADDIF ||
2543 cmd == SIOCBRDELIF ||
2544 cmd == SIOCWANDEV) {
2546 if (dev->do_ioctl) {
2547 if (netif_device_present(dev))
2548 err = dev->do_ioctl(dev, ifr,
2561 * This function handles all "interface"-type I/O control requests. The actual
2562 * 'doing' part of this is dev_ifsioc above.
2566 * dev_ioctl - network device ioctl
2567 * @cmd: command to issue
2568 * @arg: pointer to a struct ifreq in user space
2570 * Issue ioctl functions to devices. This is normally called by the
2571 * user space syscall interfaces but can sometimes be useful for
2572 * other purposes. The return value is the return from the syscall if
2573 * positive or a negative errno code on error.
2576 int dev_ioctl(unsigned int cmd, void __user *arg)
2582 /* One special case: SIOCGIFCONF takes ifconf argument
2583 and requires shared lock, because it sleeps writing
2587 if (cmd == SIOCGIFCONF) {
2589 ret = dev_ifconf((char __user *) arg);
2593 if (cmd == SIOCGIFNAME)
2594 return dev_ifname((struct ifreq __user *)arg);
2596 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2599 ifr.ifr_name[IFNAMSIZ-1] = 0;
2601 colon = strchr(ifr.ifr_name, ':');
2606 * See which interface the caller is talking about.
2611 * These ioctl calls:
2612 * - can be done by all.
2613 * - atomic and do not require locking.
2624 dev_load(ifr.ifr_name);
2625 read_lock(&dev_base_lock);
2626 ret = dev_ifsioc(&ifr, cmd);
2627 read_unlock(&dev_base_lock);
2631 if (copy_to_user(arg, &ifr,
2632 sizeof(struct ifreq)))
2638 dev_load(ifr.ifr_name);
2640 ret = dev_ethtool(&ifr);
2645 if (copy_to_user(arg, &ifr,
2646 sizeof(struct ifreq)))
2652 * These ioctl calls:
2653 * - require superuser power.
2654 * - require strict serialization.
2660 if (!capable(CAP_NET_ADMIN))
2662 dev_load(ifr.ifr_name);
2664 ret = dev_ifsioc(&ifr, cmd);
2669 if (copy_to_user(arg, &ifr,
2670 sizeof(struct ifreq)))
2676 * These ioctl calls:
2677 * - require superuser power.
2678 * - require strict serialization.
2679 * - do not return a value
2689 case SIOCSIFHWBROADCAST:
2692 case SIOCBONDENSLAVE:
2693 case SIOCBONDRELEASE:
2694 case SIOCBONDSETHWADDR:
2695 case SIOCBONDSLAVEINFOQUERY:
2696 case SIOCBONDINFOQUERY:
2697 case SIOCBONDCHANGEACTIVE:
2700 if (!capable(CAP_NET_ADMIN))
2702 dev_load(ifr.ifr_name);
2704 ret = dev_ifsioc(&ifr, cmd);
2709 /* Get the per device memory space. We can add this but
2710 * currently do not support it */
2712 /* Set the per device memory buffer space.
2713 * Not applicable in our case */
2718 * Unknown or private ioctl.
2721 if (cmd == SIOCWANDEV ||
2722 (cmd >= SIOCDEVPRIVATE &&
2723 cmd <= SIOCDEVPRIVATE + 15)) {
2724 dev_load(ifr.ifr_name);
2726 ret = dev_ifsioc(&ifr, cmd);
2728 if (!ret && copy_to_user(arg, &ifr,
2729 sizeof(struct ifreq)))
2734 /* Take care of Wireless Extensions */
2735 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
2736 /* If command is `set a parameter', or
2737 * `get the encoding parameters', check if
2738 * the user has the right to do it */
2739 if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE) {
2740 if (!capable(CAP_NET_ADMIN))
2743 dev_load(ifr.ifr_name);
2745 /* Follow me in net/core/wireless.c */
2746 ret = wireless_process_ioctl(&ifr, cmd);
2748 if (!ret && IW_IS_GET(cmd) &&
2749 copy_to_user(arg, &ifr,
2750 sizeof(struct ifreq)))
2754 #endif /* WIRELESS_EXT */
2761 * dev_new_index - allocate an ifindex
2763 * Returns a suitable unique value for a new device interface
2764 * number. The caller must hold the rtnl semaphore or the
2765 * dev_base_lock to be sure it remains unique.
2767 int dev_new_index(void)
2773 if (!__dev_get_by_index(ifindex))
2778 static int dev_boot_phase = 1;
2780 /* Delayed registration/unregisteration */
2781 static spinlock_t net_todo_list_lock = SPIN_LOCK_UNLOCKED;
2782 static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);
2784 static inline void net_set_todo(struct net_device *dev)
2786 spin_lock(&net_todo_list_lock);
2787 list_add_tail(&dev->todo_list, &net_todo_list);
2788 spin_unlock(&net_todo_list_lock);
2792 * register_netdevice - register a network device
2793 * @dev: device to register
2795 * Take a completed network device structure and add it to the kernel
2796 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
2797 * chain. 0 is returned on success. A negative errno code is returned
2798 * on a failure to set up the device, or if the name is a duplicate.
2800 * Callers must hold the rtnl semaphore. See the comment at the
2801 * end of Space.c for details about the locking. You may want
2802 * register_netdev() instead of this.
2805 * The locking appears insufficient to guarantee two parallel registers
2806 * will not get the same name.
2809 int register_netdevice(struct net_device *dev)
2811 struct hlist_head *head;
2812 struct hlist_node *p;
2815 BUG_ON(dev_boot_phase);
2818 /* When net_device's are persistent, this will be fatal. */
2819 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
2821 spin_lock_init(&dev->queue_lock);
2822 spin_lock_init(&dev->xmit_lock);
2823 dev->xmit_lock_owner = -1;
2824 #ifdef CONFIG_NET_CLS_ACT
2825 spin_lock_init(&dev->ingress_lock);
2828 ret = alloc_divert_blk(dev);
2834 /* Init, if this function is available */
2836 ret = dev->init(dev);
2844 if (!dev_valid_name(dev->name)) {
2849 dev->ifindex = dev_new_index();
2850 if (dev->iflink == -1)
2851 dev->iflink = dev->ifindex;
2853 /* Check for existence of name */
2854 head = dev_name_hash(dev->name);
2855 hlist_for_each(p, head) {
2856 struct net_device *d
2857 = hlist_entry(p, struct net_device, name_hlist);
2858 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
2864 /* Fix illegal SG+CSUM combinations. */
2865 if ((dev->features & NETIF_F_SG) &&
2866 !(dev->features & (NETIF_F_IP_CSUM |
2868 NETIF_F_HW_CSUM))) {
2869 printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
2871 dev->features &= ~NETIF_F_SG;
2875 * nil rebuild_header routine,
2876 * that should be never called and used as just bug trap.
2879 if (!dev->rebuild_header)
2880 dev->rebuild_header = default_rebuild_header;
2883 * Default initial state at registry is that the
2884 * device is present.
2887 set_bit(__LINK_STATE_PRESENT, &dev->state);
2890 dev_init_scheduler(dev);
2891 write_lock_bh(&dev_base_lock);
2893 dev_tail = &dev->next;
2894 hlist_add_head(&dev->name_hlist, head);
2895 hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex));
2897 dev->reg_state = NETREG_REGISTERING;
2898 write_unlock_bh(&dev_base_lock);
2900 /* Notify protocols, that a new device appeared. */
2901 notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
2903 /* Finish registration after unlock */
2910 free_divert_blk(dev);
2915 * netdev_wait_allrefs - wait until all references are gone.
2917 * This is called when unregistering network devices.
2919 * Any protocol or device that holds a reference should register
2920 * for netdevice notification, and cleanup and put back the
2921 * reference if they receive an UNREGISTER event.
2922 * We can get stuck here if buggy protocols don't correctly
2925 static void netdev_wait_allrefs(struct net_device *dev)
2927 unsigned long rebroadcast_time, warning_time;
2929 rebroadcast_time = warning_time = jiffies;
2930 while (atomic_read(&dev->refcnt) != 0) {
2931 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
2934 /* Rebroadcast unregister notification */
2935 notifier_call_chain(&netdev_chain,
2936 NETDEV_UNREGISTER, dev);
2938 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
2940 /* We must not have linkwatch events
2941 * pending on unregister. If this
2942 * happens, we simply run the queue
2943 * unscheduled, resulting in a noop
2946 linkwatch_run_queue();
2951 rebroadcast_time = jiffies;
2954 current->state = TASK_INTERRUPTIBLE;
2955 schedule_timeout(HZ / 4);
2957 if (time_after(jiffies, warning_time + 10 * HZ)) {
2958 printk(KERN_EMERG "unregister_netdevice: "
2959 "waiting for %s to become free. Usage "
2961 dev->name, atomic_read(&dev->refcnt));
2962 warning_time = jiffies;
2971 * register_netdevice(x1);
2972 * register_netdevice(x2);
2974 * unregister_netdevice(y1);
2975 * unregister_netdevice(y2);
2981 * We are invoked by rtnl_unlock() after it drops the semaphore.
2982 * This allows us to deal with problems:
2983 * 1) We can create/delete sysfs objects which invoke hotplug
2984 * without deadlocking with linkwatch via keventd.
2985 * 2) Since we run with the RTNL semaphore not held, we can sleep
2986 * safely in order to wait for the netdev refcnt to drop to zero.
2988 static DECLARE_MUTEX(net_todo_run_mutex);
2989 void netdev_run_todo(void)
2991 struct list_head list = LIST_HEAD_INIT(list);
2995 /* Need to guard against multiple cpu's getting out of order. */
2996 down(&net_todo_run_mutex);
2998 /* Not safe to do outside the semaphore. We must not return
2999 * until all unregister events invoked by the local processor
3000 * have been completed (either by this todo run, or one on
3003 if (list_empty(&net_todo_list))
3006 /* Snapshot list, allow later requests */
3007 spin_lock(&net_todo_list_lock);
3008 list_splice_init(&net_todo_list, &list);
3009 spin_unlock(&net_todo_list_lock);
3011 while (!list_empty(&list)) {
3012 struct net_device *dev
3013 = list_entry(list.next, struct net_device, todo_list);
3014 list_del(&dev->todo_list);
3016 switch(dev->reg_state) {
3017 case NETREG_REGISTERING:
3018 err = netdev_register_sysfs(dev);
3020 printk(KERN_ERR "%s: failed sysfs registration (%d)\n",
3022 dev->reg_state = NETREG_REGISTERED;
3025 case NETREG_UNREGISTERING:
3026 netdev_unregister_sysfs(dev);
3027 dev->reg_state = NETREG_UNREGISTERED;
3029 netdev_wait_allrefs(dev);
3032 BUG_ON(atomic_read(&dev->refcnt));
3033 BUG_TRAP(!dev->ip_ptr);
3034 BUG_TRAP(!dev->ip6_ptr);
3035 BUG_TRAP(!dev->dn_ptr);
3038 /* It must be the very last action,
3039 * after this 'dev' may point to freed up memory.
3041 if (dev->destructor)
3042 dev->destructor(dev);
3046 printk(KERN_ERR "network todo '%s' but state %d\n",
3047 dev->name, dev->reg_state);
3053 up(&net_todo_run_mutex);
3057 * free_netdev - free network device
3060 * This function does the last stage of destroying an allocated device
3061 * interface. The reference to the device object is released.
3062 * If this is the last reference then it will be freed.
3064 void free_netdev(struct net_device *dev)
3067 /* Compatiablity with error handling in drivers */
3068 if (dev->reg_state == NETREG_UNINITIALIZED) {
3069 kfree((char *)dev - dev->padded);
3073 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
3074 dev->reg_state = NETREG_RELEASED;
3076 /* will free via class release */
3077 class_device_put(&dev->class_dev);
3079 kfree((char *)dev - dev->padded);
3083 /* Synchronize with packet receive processing. */
3084 void synchronize_net(void)
3087 synchronize_kernel();
3091 * unregister_netdevice - remove device from the kernel
3094 * This function shuts down a device interface and removes it
3095 * from the kernel tables. On success 0 is returned, on a failure
3096 * a negative errno code is returned.
3098 * Callers must hold the rtnl semaphore. See the comment at the
3099 * end of Space.c for details about the locking. You may want
3100 * unregister_netdev() instead of this.
3103 int unregister_netdevice(struct net_device *dev)
3105 struct net_device *d, **dp;
3107 BUG_ON(dev_boot_phase);
3110 /* Some devices call without registering for initialization unwind. */
3111 if (dev->reg_state == NETREG_UNINITIALIZED) {
3112 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3113 "was registered\n", dev->name, dev);
3117 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3119 /* If device is running, close it first. */
3120 if (dev->flags & IFF_UP)
3123 /* And unlink it from device chain. */
3124 for (dp = &dev_base; (d = *dp) != NULL; dp = &d->next) {
3126 write_lock_bh(&dev_base_lock);
3127 hlist_del(&dev->name_hlist);
3128 hlist_del(&dev->index_hlist);
3129 if (dev_tail == &dev->next)
3132 write_unlock_bh(&dev_base_lock);
3137 printk(KERN_ERR "unregister net_device: '%s' not found\n",
3142 dev->reg_state = NETREG_UNREGISTERING;
3146 /* Shutdown queueing discipline. */
3150 /* Notify protocols, that we are about to destroy
3151 this device. They should clean all the things.
3153 notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
3156 * Flush the multicast chain
3158 dev_mc_discard(dev);
3163 /* Notifier chain MUST detach us from master device. */
3164 BUG_TRAP(!dev->master);
3166 free_divert_blk(dev);
3168 /* Finish processing unregister after unlock */
3177 #ifdef CONFIG_HOTPLUG_CPU
3178 static int dev_cpu_callback(struct notifier_block *nfb,
3179 unsigned long action,
3182 struct sk_buff **list_skb;
3183 struct net_device **list_net;
3184 struct sk_buff *skb;
3185 unsigned int cpu, oldcpu = (unsigned long)ocpu;
3186 struct softnet_data *sd, *oldsd;
3188 if (action != CPU_DEAD)
3191 local_irq_disable();
3192 cpu = smp_processor_id();
3193 sd = &per_cpu(softnet_data, cpu);
3194 oldsd = &per_cpu(softnet_data, oldcpu);
3196 /* Find end of our completion_queue. */
3197 list_skb = &sd->completion_queue;
3199 list_skb = &(*list_skb)->next;
3200 /* Append completion queue from offline CPU. */
3201 *list_skb = oldsd->completion_queue;
3202 oldsd->completion_queue = NULL;
3204 /* Find end of our output_queue. */
3205 list_net = &sd->output_queue;
3207 list_net = &(*list_net)->next_sched;
3208 /* Append output queue from offline CPU. */
3209 *list_net = oldsd->output_queue;
3210 oldsd->output_queue = NULL;
3212 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3215 /* Process offline CPU's input_pkt_queue */
3216 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
3221 #endif /* CONFIG_HOTPLUG_CPU */
3225 * Initialize the DEV module. At boot time this walks the device list and
3226 * unhooks any devices that fail to initialise (normally hardware not
3227 * present) and leaves us with a valid list of present and active devices.
3232 * This is called single threaded during boot, so no need
3233 * to take the rtnl semaphore.
3235 static int __init net_dev_init(void)
3237 int i, rc = -ENOMEM;
3239 BUG_ON(!dev_boot_phase);
3243 if (dev_proc_init())
3246 if (netdev_sysfs_init())
3249 INIT_LIST_HEAD(&ptype_all);
3250 for (i = 0; i < 16; i++)
3251 INIT_LIST_HEAD(&ptype_base[i]);
3253 for (i = 0; i < ARRAY_SIZE(dev_name_head); i++)
3254 INIT_HLIST_HEAD(&dev_name_head[i]);
3256 for (i = 0; i < ARRAY_SIZE(dev_index_head); i++)
3257 INIT_HLIST_HEAD(&dev_index_head[i]);
3260 * Initialise the packet receive queues.
3263 for (i = 0; i < NR_CPUS; i++) {
3264 struct softnet_data *queue;
3266 queue = &per_cpu(softnet_data, i);
3267 skb_queue_head_init(&queue->input_pkt_queue);
3268 queue->throttle = 0;
3269 queue->cng_level = 0;
3270 queue->avg_blog = 10; /* arbitrary non-zero */
3271 queue->completion_queue = NULL;
3272 INIT_LIST_HEAD(&queue->poll_list);
3273 set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
3274 queue->backlog_dev.weight = weight_p;
3275 queue->backlog_dev.poll = process_backlog;
3276 atomic_set(&queue->backlog_dev.refcnt, 1);
3279 #ifdef OFFLINE_SAMPLE
3280 samp_timer.expires = jiffies + (10 * HZ);
3281 add_timer(&samp_timer);
3286 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
3287 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
3289 hotcpu_notifier(dev_cpu_callback, 0);
3297 subsys_initcall(net_dev_init);
3299 EXPORT_SYMBOL(__dev_get);
3300 EXPORT_SYMBOL(__dev_get_by_flags);
3301 EXPORT_SYMBOL(__dev_get_by_index);
3302 EXPORT_SYMBOL(__dev_get_by_name);
3303 EXPORT_SYMBOL(__dev_remove_pack);
3304 EXPORT_SYMBOL(__skb_linearize);
3305 EXPORT_SYMBOL(call_netdevice_notifiers);
3306 EXPORT_SYMBOL(dev_add_pack);
3307 EXPORT_SYMBOL(dev_alloc_name);
3308 EXPORT_SYMBOL(dev_close);
3309 EXPORT_SYMBOL(dev_get_by_flags);
3310 EXPORT_SYMBOL(dev_get_by_index);
3311 EXPORT_SYMBOL(dev_get_by_name);
3312 EXPORT_SYMBOL(dev_getbyhwaddr);
3313 EXPORT_SYMBOL(dev_ioctl);
3314 EXPORT_SYMBOL(dev_new_index);
3315 EXPORT_SYMBOL(dev_open);
3316 EXPORT_SYMBOL(dev_queue_xmit);
3317 EXPORT_SYMBOL(dev_queue_xmit_nit);
3318 EXPORT_SYMBOL(dev_remove_pack);
3319 EXPORT_SYMBOL(dev_set_allmulti);
3320 EXPORT_SYMBOL(dev_set_promiscuity);
3321 EXPORT_SYMBOL(dev_change_flags);
3322 EXPORT_SYMBOL(dev_change_name);
3323 EXPORT_SYMBOL(dev_set_mtu);
3324 EXPORT_SYMBOL(free_netdev);
3325 EXPORT_SYMBOL(netdev_boot_setup_check);
3326 EXPORT_SYMBOL(netdev_set_master);
3327 EXPORT_SYMBOL(netdev_state_change);
3328 EXPORT_SYMBOL(netif_receive_skb);
3329 EXPORT_SYMBOL(netif_rx);
3330 EXPORT_SYMBOL(register_gifconf);
3331 EXPORT_SYMBOL(register_netdevice);
3332 EXPORT_SYMBOL(register_netdevice_notifier);
3333 EXPORT_SYMBOL(skb_checksum_help);
3334 EXPORT_SYMBOL(synchronize_net);
3335 EXPORT_SYMBOL(unregister_netdevice);
3336 EXPORT_SYMBOL(unregister_netdevice_notifier);
3338 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
3339 EXPORT_SYMBOL(br_handle_frame_hook);
3343 EXPORT_SYMBOL(dev_load);
3345 #ifdef CONFIG_NET_HW_FLOWCONTROL
3346 EXPORT_SYMBOL(netdev_dropping);
3347 EXPORT_SYMBOL(netdev_fc_xoff);
3348 EXPORT_SYMBOL(netdev_register_fc);
3349 EXPORT_SYMBOL(netdev_unregister_fc);
3352 #ifdef CONFIG_NET_CLS_ACT
3353 EXPORT_SYMBOL(ing_filter);
3357 EXPORT_PER_CPU_SYMBOL(softnet_data);