2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <asm/bitops.h>
78 #include <linux/config.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/string.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/errno.h>
88 #include <linux/interrupt.h>
89 #include <linux/if_ether.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/notifier.h>
93 #include <linux/skbuff.h>
95 #include <linux/rtnetlink.h>
96 #include <linux/proc_fs.h>
97 #include <linux/seq_file.h>
98 #include <linux/stat.h>
99 #include <linux/if_bridge.h>
100 #include <linux/divert.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <linux/highmem.h>
105 #include <linux/init.h>
106 #include <linux/kmod.h>
107 #include <linux/module.h>
108 #include <linux/kallsyms.h>
109 #include <linux/netpoll.h>
110 #ifdef CONFIG_NET_RADIO
111 #include <linux/wireless.h> /* Note : will define WIRELESS_EXT */
112 #include <net/iw_handler.h>
113 #endif /* CONFIG_NET_RADIO */
114 #include <asm/current.h>
116 /* This define, if set, will randomly drop a packet when congestion
117 * is more than moderate. It helps fairness in the multi-interface
118 * case when one of them is a hog, but it kills performance for the
119 * single interface case so it is off now by default.
123 /* Setting this will sample the queue lengths and thus congestion
124 * via a timer instead of as each packet is received.
126 #undef OFFLINE_SAMPLE
129 * The list of packet types we will receive (as opposed to discard)
130 * and the routines to invoke.
132 * Why 16. Because with 16 the only overlap we get on a hash of the
133 * low nibble of the protocol value is RARP/SNAP/X.25.
135 * NOTE: That is no longer true with the addition of VLAN tags. Not
136 * sure which should go first, but I bet it won't make much
137 * difference if we are running VLANs. The good news is that
138 * this protocol won't be in the list unless compiled in, so
139 * the average user (w/out VLANs) will not be adversly affected.
156 static spinlock_t ptype_lock = SPIN_LOCK_UNLOCKED;
157 static struct list_head ptype_base[16]; /* 16 way hashed list */
158 static struct list_head ptype_all; /* Taps */
160 #ifdef OFFLINE_SAMPLE
161 static void sample_queue(unsigned long dummy);
162 static struct timer_list samp_timer = TIMER_INITIALIZER(sample_queue, 0, 0);
166 * The @dev_base list is protected by @dev_base_lock and the rtln
169 * Pure readers hold dev_base_lock for reading.
171 * Writers must hold the rtnl semaphore while they loop through the
172 * dev_base list, and hold dev_base_lock for writing when they do the
173 * actual updates. This allows pure readers to access the list even
174 * while a writer is preparing to update it.
176 * To put it another way, dev_base_lock is held for writing only to
177 * protect against pure readers; the rtnl semaphore provides the
178 * protection against other writers.
180 * See, for example usages, register_netdevice() and
181 * unregister_netdevice(), which must be called with the rtnl
184 struct net_device *dev_base;
185 struct net_device **dev_tail = &dev_base;
186 rwlock_t dev_base_lock = RW_LOCK_UNLOCKED;
188 EXPORT_SYMBOL(dev_base);
189 EXPORT_SYMBOL(dev_base_lock);
191 #define NETDEV_HASHBITS 8
192 static struct hlist_head dev_name_head[1<<NETDEV_HASHBITS];
193 static struct hlist_head dev_index_head[1<<NETDEV_HASHBITS];
195 static inline struct hlist_head *dev_name_hash(const char *name)
197 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
198 return &dev_name_head[hash & ((1<<NETDEV_HASHBITS)-1)];
201 static inline struct hlist_head *dev_index_hash(int ifindex)
203 return &dev_index_head[ifindex & ((1<<NETDEV_HASHBITS)-1)];
210 static struct notifier_block *netdev_chain;
213 * Device drivers call our routines to queue packets here. We empty the
214 * queue in the local softnet handler.
216 DEFINE_PER_CPU(struct softnet_data, softnet_data) = { 0, };
218 #ifdef CONFIG_NET_FASTROUTE
219 int netdev_fastroute;
220 int netdev_fastroute_obstacles;
224 extern int netdev_sysfs_init(void);
225 extern int netdev_register_sysfs(struct net_device *);
226 extern void netdev_unregister_sysfs(struct net_device *);
228 #define netdev_sysfs_init() (0)
229 #define netdev_register_sysfs(dev) (0)
230 #define netdev_unregister_sysfs(dev) do { } while(0)
234 /*******************************************************************************
236 Protocol management and registration routines
238 *******************************************************************************/
247 * Add a protocol ID to the list. Now that the input handler is
248 * smarter we can dispense with all the messy stuff that used to be
251 * BEWARE!!! Protocol handlers, mangling input packets,
252 * MUST BE last in hash buckets and checking protocol handlers
253 * MUST start from promiscuous ptype_all chain in net_bh.
254 * It is true now, do not change it.
255 * Explanation follows: if protocol handler, mangling packet, will
256 * be the first on list, it is not able to sense, that packet
257 * is cloned and should be copied-on-write, so that it will
258 * change it and subsequent readers will get broken packet.
263 * dev_add_pack - add packet handler
264 * @pt: packet type declaration
266 * Add a protocol handler to the networking stack. The passed &packet_type
267 * is linked into kernel lists and may not be freed until it has been
268 * removed from the kernel lists.
270 * This call does not sleep therefore it can not
271 * guarantee all CPU's that are in middle of receiving packets
272 * will see the new packet type (until the next received packet).
275 void dev_add_pack(struct packet_type *pt)
279 spin_lock_bh(&ptype_lock);
280 #ifdef CONFIG_NET_FASTROUTE
281 if (pt->af_packet_priv) {
282 netdev_fastroute_obstacles++;
283 dev_clear_fastroute(pt->dev);
286 if (pt->type == htons(ETH_P_ALL)) {
288 list_add_rcu(&pt->list, &ptype_all);
290 hash = ntohs(pt->type) & 15;
291 list_add_rcu(&pt->list, &ptype_base[hash]);
293 spin_unlock_bh(&ptype_lock);
296 extern void linkwatch_run_queue(void);
301 * __dev_remove_pack - remove packet handler
302 * @pt: packet type declaration
304 * Remove a protocol handler that was previously added to the kernel
305 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
306 * from the kernel lists and can be freed or reused once this function
309 * The packet type might still be in use by receivers
310 * and must not be freed until after all the CPU's have gone
311 * through a quiescent state.
313 void __dev_remove_pack(struct packet_type *pt)
315 struct list_head *head;
316 struct packet_type *pt1;
318 spin_lock_bh(&ptype_lock);
320 if (pt->type == htons(ETH_P_ALL)) {
324 head = &ptype_base[ntohs(pt->type) & 15];
326 list_for_each_entry(pt1, head, list) {
328 #ifdef CONFIG_NET_FASTROUTE
329 if (pt->af_packet_priv)
330 netdev_fastroute_obstacles--;
332 list_del_rcu(&pt->list);
337 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
339 spin_unlock_bh(&ptype_lock);
342 * dev_remove_pack - remove packet handler
343 * @pt: packet type declaration
345 * Remove a protocol handler that was previously added to the kernel
346 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
347 * from the kernel lists and can be freed or reused once this function
350 * This call sleeps to guarantee that no CPU is looking at the packet
353 void dev_remove_pack(struct packet_type *pt)
355 __dev_remove_pack(pt);
360 /******************************************************************************
362 Device Boot-time Settings Routines
364 *******************************************************************************/
366 /* Boot time configuration table */
367 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
370 * netdev_boot_setup_add - add new setup entry
371 * @name: name of the device
372 * @map: configured settings for the device
374 * Adds new setup entry to the dev_boot_setup list. The function
375 * returns 0 on error and 1 on success. This is a generic routine to
378 int netdev_boot_setup_add(char *name, struct ifmap *map)
380 struct netdev_boot_setup *s;
384 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
385 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
386 memset(s[i].name, 0, sizeof(s[i].name));
387 strcpy(s[i].name, name);
388 memcpy(&s[i].map, map, sizeof(s[i].map));
393 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
397 * netdev_boot_setup_check - check boot time settings
398 * @dev: the netdevice
400 * Check boot time settings for the device.
401 * The found settings are set for the device to be used
402 * later in the device probing.
403 * Returns 0 if no settings found, 1 if they are.
405 int netdev_boot_setup_check(struct net_device *dev)
407 struct netdev_boot_setup *s = dev_boot_setup;
410 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
411 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
412 !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
413 dev->irq = s[i].map.irq;
414 dev->base_addr = s[i].map.base_addr;
415 dev->mem_start = s[i].map.mem_start;
416 dev->mem_end = s[i].map.mem_end;
425 * netdev_boot_base - get address from boot time settings
426 * @prefix: prefix for network device
427 * @unit: id for network device
429 * Check boot time settings for the base address of device.
430 * The found settings are set for the device to be used
431 * later in the device probing.
432 * Returns 0 if no settings found.
434 unsigned long netdev_boot_base(const char *prefix, int unit)
436 const struct netdev_boot_setup *s = dev_boot_setup;
440 sprintf(name, "%s%d", prefix, unit);
443 * If device already registered then return base of 1
444 * to indicate not to probe for this interface
446 if (__dev_get_by_name(name))
449 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
450 if (!strcmp(name, s[i].name))
451 return s[i].map.base_addr;
456 * Saves at boot time configured settings for any netdevice.
458 int __init netdev_boot_setup(char *str)
463 str = get_options(str, ARRAY_SIZE(ints), ints);
468 memset(&map, 0, sizeof(map));
472 map.base_addr = ints[2];
474 map.mem_start = ints[3];
476 map.mem_end = ints[4];
478 /* Add new entry to the list */
479 return netdev_boot_setup_add(str, &map);
482 __setup("netdev=", netdev_boot_setup);
484 /*******************************************************************************
486 Device Interface Subroutines
488 *******************************************************************************/
491 * __dev_get_by_name - find a device by its name
492 * @name: name to find
494 * Find an interface by name. Must be called under RTNL semaphore
495 * or @dev_base_lock. If the name is found a pointer to the device
496 * is returned. If the name is not found then %NULL is returned. The
497 * reference counters are not incremented so the caller must be
498 * careful with locks.
501 struct net_device *__dev_get_by_name(const char *name)
503 struct hlist_node *p;
505 hlist_for_each(p, dev_name_hash(name)) {
506 struct net_device *dev
507 = hlist_entry(p, struct net_device, name_hlist);
508 if (!strncmp(dev->name, name, IFNAMSIZ))
515 * dev_get_by_name - find a device by its name
516 * @name: name to find
518 * Find an interface by name. This can be called from any
519 * context and does its own locking. The returned handle has
520 * the usage count incremented and the caller must use dev_put() to
521 * release it when it is no longer needed. %NULL is returned if no
522 * matching device is found.
525 struct net_device *dev_get_by_name(const char *name)
527 struct net_device *dev;
529 read_lock(&dev_base_lock);
530 dev = __dev_get_by_name(name);
533 read_unlock(&dev_base_lock);
538 Return value is changed to int to prevent illegal usage in future.
539 It is still legal to use to check for device existence.
541 User should understand, that the result returned by this function
542 is meaningless, if it was not issued under rtnl semaphore.
546 * dev_get - test if a device exists
547 * @name: name to test for
549 * Test if a name exists. Returns true if the name is found. In order
550 * to be sure the name is not allocated or removed during the test the
551 * caller must hold the rtnl semaphore.
553 * This function exists only for back compatibility with older
556 int __dev_get(const char *name)
558 struct net_device *dev;
560 read_lock(&dev_base_lock);
561 dev = __dev_get_by_name(name);
562 read_unlock(&dev_base_lock);
567 * __dev_get_by_index - find a device by its ifindex
568 * @ifindex: index of device
570 * Search for an interface by index. Returns %NULL if the device
571 * is not found or a pointer to the device. The device has not
572 * had its reference counter increased so the caller must be careful
573 * about locking. The caller must hold either the RTNL semaphore
577 struct net_device *__dev_get_by_index(int ifindex)
579 struct hlist_node *p;
581 hlist_for_each(p, dev_index_hash(ifindex)) {
582 struct net_device *dev
583 = hlist_entry(p, struct net_device, index_hlist);
584 if (dev->ifindex == ifindex)
592 * dev_get_by_index - find a device by its ifindex
593 * @ifindex: index of device
595 * Search for an interface by index. Returns NULL if the device
596 * is not found or a pointer to the device. The device returned has
597 * had a reference added and the pointer is safe until the user calls
598 * dev_put to indicate they have finished with it.
601 struct net_device *dev_get_by_index(int ifindex)
603 struct net_device *dev;
605 read_lock(&dev_base_lock);
606 dev = __dev_get_by_index(ifindex);
609 read_unlock(&dev_base_lock);
614 * dev_getbyhwaddr - find a device by its hardware address
615 * @type: media type of device
616 * @ha: hardware address
618 * Search for an interface by MAC address. Returns NULL if the device
619 * is not found or a pointer to the device. The caller must hold the
620 * rtnl semaphore. The returned device has not had its ref count increased
621 * and the caller must therefore be careful about locking
624 * If the API was consistent this would be __dev_get_by_hwaddr
627 struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
629 struct net_device *dev;
633 for (dev = dev_base; dev; dev = dev->next)
634 if (dev->type == type &&
635 !memcmp(dev->dev_addr, ha, dev->addr_len))
640 struct net_device *__dev_getfirstbyhwtype(unsigned short type)
642 struct net_device *dev;
644 for (dev = dev_base; dev; dev = dev->next)
645 if (dev->type == type)
650 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
652 struct net_device *dev_getfirstbyhwtype(unsigned short type)
654 struct net_device *dev;
657 dev = __dev_getfirstbyhwtype(type);
664 EXPORT_SYMBOL(dev_getfirstbyhwtype);
667 * dev_get_by_flags - find any device with given flags
668 * @if_flags: IFF_* values
669 * @mask: bitmask of bits in if_flags to check
671 * Search for any interface with the given flags. Returns NULL if a device
672 * is not found or a pointer to the device. The device returned has
673 * had a reference added and the pointer is safe until the user calls
674 * dev_put to indicate they have finished with it.
677 struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask)
679 struct net_device *dev;
681 read_lock(&dev_base_lock);
682 dev = __dev_get_by_flags(if_flags, mask);
685 read_unlock(&dev_base_lock);
690 * __dev_get_by_flags - find any device with given flags
691 * @if_flags: IFF_* values
692 * @mask: bitmask of bits in if_flags to check
694 * Search for any interface with the given flags. Returns NULL if a device
695 * is not found or a pointer to the device. The caller must hold either
696 * the RTNL semaphore or @dev_base_lock.
699 struct net_device *__dev_get_by_flags(unsigned short if_flags, unsigned short mask)
701 struct net_device *dev;
703 for (dev = dev_base; dev != NULL; dev = dev->next) {
704 if (((dev->flags ^ if_flags) & mask) == 0)
711 * dev_valid_name - check if name is okay for network device
714 * Network device names need to be valid file names to
715 * to allow sysfs to work
717 int dev_valid_name(const char *name)
719 return !(*name == '\0'
720 || !strcmp(name, ".")
721 || !strcmp(name, "..")
722 || strchr(name, '/'));
726 * dev_alloc_name - allocate a name for a device
728 * @name: name format string
730 * Passed a format string - eg "lt%d" it will try and find a suitable
731 * id. Not efficient for many devices, not called a lot. The caller
732 * must hold the dev_base or rtnl lock while allocating the name and
733 * adding the device in order to avoid duplicates. Returns the number
734 * of the unit assigned or a negative errno code.
737 int dev_alloc_name(struct net_device *dev, const char *name)
742 const int max_netdevices = 8*PAGE_SIZE;
744 struct net_device *d;
746 p = strnchr(name, IFNAMSIZ-1, '%');
749 * Verify the string as this thing may have come from
750 * the user. There must be either one "%d" and no other "%"
753 if (p[1] != 'd' || strchr(p + 2, '%'))
756 /* Use one page as a bit array of possible slots */
757 inuse = (long *) get_zeroed_page(GFP_ATOMIC);
761 for (d = dev_base; d; d = d->next) {
762 if (!sscanf(d->name, name, &i))
764 if (i < 0 || i >= max_netdevices)
767 /* avoid cases where sscanf is not exact inverse of printf */
768 snprintf(buf, sizeof(buf), name, i);
769 if (!strncmp(buf, d->name, IFNAMSIZ))
773 i = find_first_zero_bit(inuse, max_netdevices);
774 free_page((unsigned long) inuse);
777 snprintf(buf, sizeof(buf), name, i);
778 if (!__dev_get_by_name(buf)) {
779 strlcpy(dev->name, buf, IFNAMSIZ);
783 /* It is possible to run out of possible slots
784 * when the name is long and there isn't enough space left
785 * for the digits, or if all bits are used.
792 * dev_change_name - change name of a device
794 * @newname: name (or format string) must be at least IFNAMSIZ
796 * Change name of a device, can pass format strings "eth%d".
799 int dev_change_name(struct net_device *dev, char *newname)
805 if (dev->flags & IFF_UP)
808 if (!dev_valid_name(newname))
811 if (strchr(newname, '%')) {
812 err = dev_alloc_name(dev, newname);
815 strcpy(newname, dev->name);
817 else if (__dev_get_by_name(newname))
820 strlcpy(dev->name, newname, IFNAMSIZ);
822 err = class_device_rename(&dev->class_dev, dev->name);
824 hlist_del(&dev->name_hlist);
825 hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
826 notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
833 * netdev_state_change - device changes state
834 * @dev: device to cause notification
836 * Called to indicate a device has changed state. This function calls
837 * the notifier chains for netdev_chain and sends a NEWLINK message
838 * to the routing socket.
840 void netdev_state_change(struct net_device *dev)
842 if (dev->flags & IFF_UP) {
843 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
844 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
849 * dev_load - load a network module
850 * @name: name of interface
852 * If a network interface is not present and the process has suitable
853 * privileges this function loads the module. If module loading is not
854 * available in this kernel then it becomes a nop.
857 void dev_load(const char *name)
859 struct net_device *dev;
861 read_lock(&dev_base_lock);
862 dev = __dev_get_by_name(name);
863 read_unlock(&dev_base_lock);
865 if (!dev && capable(CAP_SYS_MODULE))
866 request_module("%s", name);
869 static int default_rebuild_header(struct sk_buff *skb)
871 printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n",
872 skb->dev ? skb->dev->name : "NULL!!!");
879 * Some old buggy device drivers change get_stats after registering
880 * the device. Try and trap them here.
881 * This can be elimnated when all devices are known fixed.
883 static inline int get_stats_changed(struct net_device *dev)
885 int changed = dev->last_stats != dev->get_stats;
886 dev->last_stats = dev->get_stats;
891 * dev_open - prepare an interface for use.
892 * @dev: device to open
894 * Takes a device from down to up state. The device's private open
895 * function is invoked and then the multicast lists are loaded. Finally
896 * the device is moved into the up state and a %NETDEV_UP message is
897 * sent to the netdev notifier chain.
899 * Calling this function on an active interface is a nop. On a failure
900 * a negative errno code is returned.
902 int dev_open(struct net_device *dev)
910 if (dev->flags & IFF_UP)
914 * Check for broken device drivers.
916 if (get_stats_changed(dev) && net_ratelimit()) {
917 printk(KERN_ERR "%s: driver changed get_stats after register\n",
922 * Is it even present?
924 if (!netif_device_present(dev))
928 * Call device private open method
930 set_bit(__LINK_STATE_START, &dev->state);
932 ret = dev->open(dev);
934 clear_bit(__LINK_STATE_START, &dev->state);
938 * Check for more broken device drivers.
940 if (get_stats_changed(dev) && net_ratelimit()) {
941 printk(KERN_ERR "%s: driver changed get_stats in open\n",
946 * If it went open OK then:
953 dev->flags |= IFF_UP;
956 * Initialize multicasting status
961 * Wakeup transmit queue engine
966 * ... and announce new interface.
968 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
973 #ifdef CONFIG_NET_FASTROUTE
975 static void dev_do_clear_fastroute(struct net_device *dev)
977 if (dev->accept_fastpath) {
980 for (i = 0; i <= NETDEV_FASTROUTE_HMASK; i++) {
981 struct dst_entry *dst;
983 write_lock_irq(&dev->fastpath_lock);
984 dst = dev->fastpath[i];
985 dev->fastpath[i] = NULL;
986 write_unlock_irq(&dev->fastpath_lock);
993 void dev_clear_fastroute(struct net_device *dev)
996 dev_do_clear_fastroute(dev);
998 read_lock(&dev_base_lock);
999 for (dev = dev_base; dev; dev = dev->next)
1000 dev_do_clear_fastroute(dev);
1001 read_unlock(&dev_base_lock);
1007 * dev_close - shutdown an interface.
1008 * @dev: device to shutdown
1010 * This function moves an active device into down state. A
1011 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1012 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1015 int dev_close(struct net_device *dev)
1017 if (!(dev->flags & IFF_UP))
1021 * Tell people we are going down, so that they can
1022 * prepare to death, when device is still operating.
1024 notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
1026 dev_deactivate(dev);
1028 clear_bit(__LINK_STATE_START, &dev->state);
1030 /* Synchronize to scheduled poll. We cannot touch poll list,
1031 * it can be even on different cpu. So just clear netif_running(),
1032 * and wait when poll really will happen. Actually, the best place
1033 * for this is inside dev->stop() after device stopped its irq
1034 * engine, but this requires more changes in devices. */
1036 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1037 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
1039 current->state = TASK_INTERRUPTIBLE;
1040 schedule_timeout(1);
1044 * Call the device specific close. This cannot fail.
1045 * Only if device is UP
1047 * We allow it to be called even after a DETACH hot-plug
1054 * Device is now down.
1057 dev->flags &= ~IFF_UP;
1058 #ifdef CONFIG_NET_FASTROUTE
1059 dev_clear_fastroute(dev);
1063 * Tell people we are down
1065 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
1072 * Device change register/unregister. These are not inline or static
1073 * as we export them to the world.
1077 * register_netdevice_notifier - register a network notifier block
1080 * Register a notifier to be called when network device events occur.
1081 * The notifier passed is linked into the kernel structures and must
1082 * not be reused until it has been unregistered. A negative errno code
1083 * is returned on a failure.
1085 * When registered all registration and up events are replayed
1086 * to the new notifier to allow device to have a race free
1087 * view of the network device list.
1090 int register_netdevice_notifier(struct notifier_block *nb)
1092 struct net_device *dev;
1096 err = notifier_chain_register(&netdev_chain, nb);
1098 for (dev = dev_base; dev; dev = dev->next) {
1099 nb->notifier_call(nb, NETDEV_REGISTER, dev);
1101 if (dev->flags & IFF_UP)
1102 nb->notifier_call(nb, NETDEV_UP, dev);
1110 * unregister_netdevice_notifier - unregister a network notifier block
1113 * Unregister a notifier previously registered by
1114 * register_netdevice_notifier(). The notifier is unlinked into the
1115 * kernel structures and may then be reused. A negative errno code
1116 * is returned on a failure.
1119 int unregister_netdevice_notifier(struct notifier_block *nb)
1121 return notifier_chain_unregister(&netdev_chain, nb);
1125 * call_netdevice_notifiers - call all network notifier blocks
1126 * @val: value passed unmodified to notifier function
1127 * @v: pointer passed unmodified to notifier function
1129 * Call all network notifier blocks. Parameters and return value
1130 * are as for notifier_call_chain().
1133 int call_netdevice_notifiers(unsigned long val, void *v)
1135 return notifier_call_chain(&netdev_chain, val, v);
1139 * Support routine. Sends outgoing frames to any network
1140 * taps currently in use.
1143 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1145 struct packet_type *ptype;
1146 net_timestamp(&skb->stamp);
1149 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1150 /* Never send packets back to the socket
1151 * they originated from - MvS (miquels@drinkel.ow.org)
1153 if ((ptype->dev == dev || !ptype->dev) &&
1154 (ptype->af_packet_priv == NULL ||
1155 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1156 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1160 /* skb->nh should be correctly
1161 set by sender, so that the second statement is
1162 just protection against buggy protocols.
1164 skb2->mac.raw = skb2->data;
1166 if (skb2->nh.raw < skb2->data ||
1167 skb2->nh.raw > skb2->tail) {
1168 if (net_ratelimit())
1169 printk(KERN_CRIT "protocol %04x is "
1171 skb2->protocol, dev->name);
1172 skb2->nh.raw = skb2->data;
1175 skb2->h.raw = skb2->nh.raw;
1176 skb2->pkt_type = PACKET_OUTGOING;
1177 ptype->func(skb2, skb->dev, ptype);
1183 /* Calculate csum in the case, when packet is misrouted.
1184 * If it failed by some reason, ignore and send skb with wrong
1187 struct sk_buff *skb_checksum_help(struct sk_buff *skb)
1190 int offset = skb->h.raw - skb->data;
1192 if (offset > (int)skb->len)
1194 csum = skb_checksum(skb, offset, skb->len-offset, 0);
1196 offset = skb->tail - skb->h.raw;
1199 if (skb->csum + 2 > offset)
1202 *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
1203 skb->ip_summed = CHECKSUM_NONE;
1207 #ifdef CONFIG_HIGHMEM
1208 /* Actually, we should eliminate this check as soon as we know, that:
1209 * 1. IOMMU is present and allows to map all the memory.
1210 * 2. No high memory really exists on this machine.
1213 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1217 if (dev->features & NETIF_F_HIGHDMA)
1220 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1221 if (skb_shinfo(skb)->frags[i].page >= highmem_start_page)
1227 #define illegal_highdma(dev, skb) (0)
1230 extern void skb_release_data(struct sk_buff *);
1232 /* Keep head the same: replace data */
1233 int __skb_linearize(struct sk_buff *skb, int gfp_mask)
1238 struct skb_shared_info *ninfo;
1239 int headerlen = skb->data - skb->head;
1240 int expand = (skb->tail + skb->data_len) - skb->end;
1242 if (skb_shared(skb))
1248 size = skb->end - skb->head + expand;
1249 size = SKB_DATA_ALIGN(size);
1250 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
1254 /* Copy entire thing */
1255 if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
1259 ninfo = (struct skb_shared_info*)(data + size);
1260 atomic_set(&ninfo->dataref, 1);
1261 ninfo->tso_size = skb_shinfo(skb)->tso_size;
1262 ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
1263 ninfo->nr_frags = 0;
1264 ninfo->frag_list = NULL;
1266 /* Offset between the two in bytes */
1267 offset = data - skb->head;
1269 /* Free old data. */
1270 skb_release_data(skb);
1273 skb->end = data + size;
1275 /* Set up new pointers */
1276 skb->h.raw += offset;
1277 skb->nh.raw += offset;
1278 skb->mac.raw += offset;
1279 skb->tail += offset;
1280 skb->data += offset;
1282 /* We are no longer a clone, even if we were. */
1285 skb->tail += skb->data_len;
1291 * dev_queue_xmit - transmit a buffer
1292 * @skb: buffer to transmit
1294 * Queue a buffer for transmission to a network device. The caller must
1295 * have set the device and priority and built the buffer before calling
1296 * this function. The function can be called from an interrupt.
1298 * A negative errno code is returned on a failure. A success does not
1299 * guarantee the frame will be transmitted as it may be dropped due
1300 * to congestion or traffic shaping.
1303 int dev_queue_xmit(struct sk_buff *skb)
1305 struct net_device *dev = skb->dev;
1309 if (skb_shinfo(skb)->frag_list &&
1310 !(dev->features & NETIF_F_FRAGLIST) &&
1311 __skb_linearize(skb, GFP_ATOMIC))
1314 /* Fragmented skb is linearized if device does not support SG,
1315 * or if at least one of fragments is in highmem and device
1316 * does not support DMA from it.
1318 if (skb_shinfo(skb)->nr_frags &&
1319 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1320 __skb_linearize(skb, GFP_ATOMIC))
1323 /* If packet is not checksummed and device does not support
1324 * checksumming for this protocol, complete checksumming here.
1326 if (skb->ip_summed == CHECKSUM_HW &&
1327 (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) &&
1328 (!(dev->features & NETIF_F_IP_CSUM) ||
1329 skb->protocol != htons(ETH_P_IP)))) {
1330 if ((skb = skb_checksum_help(skb)) == NULL)
1334 /* Grab device queue */
1335 spin_lock_bh(&dev->queue_lock);
1338 rc = q->enqueue(skb, q);
1342 spin_unlock_bh(&dev->queue_lock);
1343 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1347 /* The device has no queue. Common case for software devices:
1348 loopback, all the sorts of tunnels...
1350 Really, it is unlikely that xmit_lock protection is necessary here.
1351 (f.e. loopback and IP tunnels are clean ignoring statistics
1353 However, it is possible, that they rely on protection
1356 Check this and shot the lock. It is not prone from deadlocks.
1357 Either shot noqueue qdisc, it is even simpler 8)
1359 if (dev->flags & IFF_UP) {
1360 int cpu = smp_processor_id();
1362 if (dev->xmit_lock_owner != cpu) {
1364 * The spin_lock effectivly does a preempt lock, but
1365 * we are about to drop that...
1368 spin_unlock(&dev->queue_lock);
1369 spin_lock(&dev->xmit_lock);
1370 dev->xmit_lock_owner = cpu;
1373 if (!netif_queue_stopped(dev)) {
1375 dev_queue_xmit_nit(skb, dev);
1378 if (!dev->hard_start_xmit(skb, dev)) {
1379 dev->xmit_lock_owner = -1;
1380 spin_unlock_bh(&dev->xmit_lock);
1384 dev->xmit_lock_owner = -1;
1385 spin_unlock_bh(&dev->xmit_lock);
1386 if (net_ratelimit())
1387 printk(KERN_CRIT "Virtual device %s asks to "
1388 "queue packet!\n", dev->name);
1391 /* Recursion is detected! It is possible,
1393 if (net_ratelimit())
1394 printk(KERN_CRIT "Dead loop on virtual device "
1395 "%s, fix it urgently!\n", dev->name);
1398 spin_unlock_bh(&dev->queue_lock);
1408 /*=======================================================================
1410 =======================================================================*/
1412 int netdev_max_backlog = 300;
1413 int weight_p = 64; /* old backlog weight */
1414 /* These numbers are selected based on intuition and some
1415 * experimentatiom, if you have more scientific way of doing this
1416 * please go ahead and fix things.
1418 int no_cong_thresh = 10;
1423 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1426 #ifdef CONFIG_NET_HW_FLOWCONTROL
1427 atomic_t netdev_dropping = ATOMIC_INIT(0);
1428 static unsigned long netdev_fc_mask = 1;
1429 unsigned long netdev_fc_xoff;
1430 spinlock_t netdev_fc_lock = SPIN_LOCK_UNLOCKED;
1434 void (*stimul)(struct net_device *);
1435 struct net_device *dev;
1436 } netdev_fc_slots[BITS_PER_LONG];
1438 int netdev_register_fc(struct net_device *dev,
1439 void (*stimul)(struct net_device *dev))
1442 unsigned long flags;
1444 spin_lock_irqsave(&netdev_fc_lock, flags);
1445 if (netdev_fc_mask != ~0UL) {
1446 bit = ffz(netdev_fc_mask);
1447 netdev_fc_slots[bit].stimul = stimul;
1448 netdev_fc_slots[bit].dev = dev;
1449 set_bit(bit, &netdev_fc_mask);
1450 clear_bit(bit, &netdev_fc_xoff);
1452 spin_unlock_irqrestore(&netdev_fc_lock, flags);
1456 void netdev_unregister_fc(int bit)
1458 unsigned long flags;
1460 spin_lock_irqsave(&netdev_fc_lock, flags);
1462 netdev_fc_slots[bit].stimul = NULL;
1463 netdev_fc_slots[bit].dev = NULL;
1464 clear_bit(bit, &netdev_fc_mask);
1465 clear_bit(bit, &netdev_fc_xoff);
1467 spin_unlock_irqrestore(&netdev_fc_lock, flags);
1470 static void netdev_wakeup(void)
1474 spin_lock(&netdev_fc_lock);
1475 xoff = netdev_fc_xoff;
1480 netdev_fc_slots[i].stimul(netdev_fc_slots[i].dev);
1482 spin_unlock(&netdev_fc_lock);
1486 static void get_sample_stats(int cpu)
1492 struct softnet_data *sd = &per_cpu(softnet_data, cpu);
1493 int blog = sd->input_pkt_queue.qlen;
1494 int avg_blog = sd->avg_blog;
1496 avg_blog = (avg_blog >> 1) + (blog >> 1);
1498 if (avg_blog > mod_cong) {
1499 /* Above moderate congestion levels. */
1500 sd->cng_level = NET_RX_CN_HIGH;
1503 rq = rd % netdev_max_backlog;
1504 if (rq < avg_blog) /* unlucky bastard */
1505 sd->cng_level = NET_RX_DROP;
1507 } else if (avg_blog > lo_cong) {
1508 sd->cng_level = NET_RX_CN_MOD;
1511 rq = rd % netdev_max_backlog;
1512 if (rq < avg_blog) /* unlucky bastard */
1513 sd->cng_level = NET_RX_CN_HIGH;
1515 } else if (avg_blog > no_cong)
1516 sd->cng_level = NET_RX_CN_LOW;
1517 else /* no congestion */
1518 sd->cng_level = NET_RX_SUCCESS;
1520 sd->avg_blog = avg_blog;
1523 #ifdef OFFLINE_SAMPLE
1524 static void sample_queue(unsigned long dummy)
1526 /* 10 ms 0r 1ms -- i don't care -- JHS */
1528 int cpu = smp_processor_id();
1530 get_sample_stats(cpu);
1531 next_tick += jiffies;
1532 mod_timer(&samp_timer, next_tick);
1538 * netif_rx - post buffer to the network code
1539 * @skb: buffer to post
1541 * This function receives a packet from a device driver and queues it for
1542 * the upper (protocol) levels to process. It always succeeds. The buffer
1543 * may be dropped during processing for congestion control or by the
1547 * NET_RX_SUCCESS (no congestion)
1548 * NET_RX_CN_LOW (low congestion)
1549 * NET_RX_CN_MOD (moderate congestion)
1550 * NET_RX_CN_HIGH (high congestion)
1551 * NET_RX_DROP (packet was dropped)
1555 int netif_rx(struct sk_buff *skb)
1558 struct softnet_data *queue;
1559 unsigned long flags;
1561 #ifdef CONFIG_NETPOLL_RX
1562 if (skb->dev->netpoll_rx && netpoll_rx(skb)) {
1568 if (!skb->stamp.tv_sec)
1569 net_timestamp(&skb->stamp);
1572 * The code is rearranged so that the path is the most
1573 * short when CPU is congested, but is still operating.
1575 local_irq_save(flags);
1576 this_cpu = smp_processor_id();
1577 queue = &__get_cpu_var(softnet_data);
1579 __get_cpu_var(netdev_rx_stat).total++;
1580 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1581 if (queue->input_pkt_queue.qlen) {
1582 if (queue->throttle)
1587 __skb_queue_tail(&queue->input_pkt_queue, skb);
1588 #ifndef OFFLINE_SAMPLE
1589 get_sample_stats(this_cpu);
1591 local_irq_restore(flags);
1592 return queue->cng_level;
1595 if (queue->throttle) {
1596 queue->throttle = 0;
1597 #ifdef CONFIG_NET_HW_FLOWCONTROL
1598 if (atomic_dec_and_test(&netdev_dropping))
1603 netif_rx_schedule(&queue->backlog_dev);
1607 if (!queue->throttle) {
1608 queue->throttle = 1;
1609 __get_cpu_var(netdev_rx_stat).throttled++;
1610 #ifdef CONFIG_NET_HW_FLOWCONTROL
1611 atomic_inc(&netdev_dropping);
1616 __get_cpu_var(netdev_rx_stat).dropped++;
1617 local_irq_restore(flags);
1623 static __inline__ void skb_bond(struct sk_buff *skb)
1625 struct net_device *dev = skb->dev;
1628 skb->real_dev = skb->dev;
1629 skb->dev = dev->master;
1633 static void net_tx_action(struct softirq_action *h)
1635 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1637 if (sd->completion_queue) {
1638 struct sk_buff *clist;
1640 local_irq_disable();
1641 clist = sd->completion_queue;
1642 sd->completion_queue = NULL;
1646 struct sk_buff *skb = clist;
1647 clist = clist->next;
1649 BUG_TRAP(!atomic_read(&skb->users));
1654 if (sd->output_queue) {
1655 struct net_device *head;
1657 local_irq_disable();
1658 head = sd->output_queue;
1659 sd->output_queue = NULL;
1663 struct net_device *dev = head;
1664 head = head->next_sched;
1666 smp_mb__before_clear_bit();
1667 clear_bit(__LINK_STATE_SCHED, &dev->state);
1669 if (spin_trylock(&dev->queue_lock)) {
1671 spin_unlock(&dev->queue_lock);
1673 netif_schedule(dev);
1679 static __inline__ int deliver_skb(struct sk_buff *skb,
1680 struct packet_type *pt_prev, int last)
1682 atomic_inc(&skb->users);
1683 return pt_prev->func(skb, skb->dev, pt_prev);
1687 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
1688 int (*br_handle_frame_hook)(struct sk_buff *skb);
1690 static __inline__ int handle_bridge(struct sk_buff *skb,
1691 struct packet_type *pt_prev)
1693 int ret = NET_RX_DROP;
1695 ret = deliver_skb(skb, pt_prev, 0);
1702 static inline int __handle_bridge(struct sk_buff *skb,
1703 struct packet_type **pt_prev, int *ret)
1705 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
1706 if (skb->dev->br_port && skb->pkt_type != PACKET_LOOPBACK) {
1707 *ret = handle_bridge(skb, *pt_prev);
1708 if (br_handle_frame_hook(skb) == 0)
1717 int netif_receive_skb(struct sk_buff *skb)
1719 struct packet_type *ptype, *pt_prev;
1720 int ret = NET_RX_DROP;
1721 unsigned short type;
1723 #ifdef CONFIG_NETPOLL_RX
1724 if (skb->dev->netpoll_rx && skb->dev->poll && netpoll_rx(skb)) {
1730 if (!skb->stamp.tv_sec)
1731 net_timestamp(&skb->stamp);
1735 __get_cpu_var(netdev_rx_stat).total++;
1737 #ifdef CONFIG_NET_FASTROUTE
1738 if (skb->pkt_type == PACKET_FASTROUTE) {
1739 __get_cpu_var(netdev_rx_stat).fastroute_deferred_out++;
1740 return dev_queue_xmit(skb);
1744 skb->h.raw = skb->nh.raw = skb->data;
1745 skb->mac_len = skb->nh.raw - skb->mac.raw;
1749 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1750 if (!ptype->dev || ptype->dev == skb->dev) {
1752 ret = deliver_skb(skb, pt_prev, 0);
1757 handle_diverter(skb);
1759 if (__handle_bridge(skb, &pt_prev, &ret))
1762 type = skb->protocol;
1763 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
1764 if (ptype->type == type &&
1765 (!ptype->dev || ptype->dev == skb->dev)) {
1767 ret = deliver_skb(skb, pt_prev, 0);
1773 ret = pt_prev->func(skb, skb->dev, pt_prev);
1776 /* Jamal, now you will not able to escape explaining
1777 * me how you were going to use this. :-)
1787 static int process_backlog(struct net_device *backlog_dev, int *budget)
1790 int quota = min(backlog_dev->quota, *budget);
1791 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1792 unsigned long start_time = jiffies;
1795 struct sk_buff *skb;
1796 struct net_device *dev;
1798 local_irq_disable();
1799 skb = __skb_dequeue(&queue->input_pkt_queue);
1806 netif_receive_skb(skb);
1812 if (work >= quota || jiffies - start_time > 1)
1815 #ifdef CONFIG_NET_HW_FLOWCONTROL
1816 if (queue->throttle &&
1817 queue->input_pkt_queue.qlen < no_cong_thresh ) {
1818 queue->throttle = 0;
1819 if (atomic_dec_and_test(&netdev_dropping)) {
1827 backlog_dev->quota -= work;
1832 backlog_dev->quota -= work;
1835 list_del(&backlog_dev->poll_list);
1836 smp_mb__before_clear_bit();
1837 netif_poll_enable(backlog_dev);
1839 if (queue->throttle) {
1840 queue->throttle = 0;
1841 #ifdef CONFIG_NET_HW_FLOWCONTROL
1842 if (atomic_dec_and_test(&netdev_dropping))
1850 static void net_rx_action(struct softirq_action *h)
1852 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1853 unsigned long start_time = jiffies;
1854 int budget = netdev_max_backlog;
1857 local_irq_disable();
1859 while (!list_empty(&queue->poll_list)) {
1860 struct net_device *dev;
1862 if (budget <= 0 || jiffies - start_time > 1)
1867 dev = list_entry(queue->poll_list.next,
1868 struct net_device, poll_list);
1870 if (dev->quota <= 0 || dev->poll(dev, &budget)) {
1871 local_irq_disable();
1872 list_del(&dev->poll_list);
1873 list_add_tail(&dev->poll_list, &queue->poll_list);
1875 dev->quota += dev->weight;
1877 dev->quota = dev->weight;
1880 local_irq_disable();
1888 __get_cpu_var(netdev_rx_stat).time_squeeze++;
1889 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
1893 static gifconf_func_t * gifconf_list [NPROTO];
1896 * register_gifconf - register a SIOCGIF handler
1897 * @family: Address family
1898 * @gifconf: Function handler
1900 * Register protocol dependent address dumping routines. The handler
1901 * that is passed must not be freed or reused until it has been replaced
1902 * by another handler.
1904 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
1906 if (family >= NPROTO)
1908 gifconf_list[family] = gifconf;
1914 * Map an interface index to its name (SIOCGIFNAME)
1918 * We need this ioctl for efficient implementation of the
1919 * if_indextoname() function required by the IPv6 API. Without
1920 * it, we would have to search all the interfaces to find a
1924 static int dev_ifname(struct ifreq __user *arg)
1926 struct net_device *dev;
1930 * Fetch the caller's info block.
1933 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
1936 read_lock(&dev_base_lock);
1937 dev = __dev_get_by_index(ifr.ifr_ifindex);
1939 read_unlock(&dev_base_lock);
1943 strcpy(ifr.ifr_name, dev->name);
1944 read_unlock(&dev_base_lock);
1946 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
1952 * Perform a SIOCGIFCONF call. This structure will change
1953 * size eventually, and there is nothing I can do about it.
1954 * Thus we will need a 'compatibility mode'.
1957 static int dev_ifconf(char __user *arg)
1960 struct net_device *dev;
1967 * Fetch the caller's info block.
1970 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
1977 * Loop over the interfaces, and write an info block for each.
1981 for (dev = dev_base; dev; dev = dev->next) {
1982 for (i = 0; i < NPROTO; i++) {
1983 if (gifconf_list[i]) {
1986 done = gifconf_list[i](dev, NULL, 0);
1988 done = gifconf_list[i](dev, pos + total,
1998 * All done. Write the updated control block back to the caller.
2000 ifc.ifc_len = total;
2003 * Both BSD and Solaris return 0 here, so we do too.
2005 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2008 #ifdef CONFIG_PROC_FS
2010 * This is invoked by the /proc filesystem handler to display a device
2013 static __inline__ struct net_device *dev_get_idx(loff_t pos)
2015 struct net_device *dev;
2018 for (i = 0, dev = dev_base; dev && i < pos; ++i, dev = dev->next);
2020 return i == pos ? dev : NULL;
2023 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2025 read_lock(&dev_base_lock);
2026 return *pos ? dev_get_idx(*pos - 1) : SEQ_START_TOKEN;
2029 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2032 return v == SEQ_START_TOKEN ? dev_base : ((struct net_device *)v)->next;
2035 void dev_seq_stop(struct seq_file *seq, void *v)
2037 read_unlock(&dev_base_lock);
2040 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2042 if (dev->get_stats) {
2043 struct net_device_stats *stats = dev->get_stats(dev);
2045 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2046 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2047 dev->name, stats->rx_bytes, stats->rx_packets,
2049 stats->rx_dropped + stats->rx_missed_errors,
2050 stats->rx_fifo_errors,
2051 stats->rx_length_errors + stats->rx_over_errors +
2052 stats->rx_crc_errors + stats->rx_frame_errors,
2053 stats->rx_compressed, stats->multicast,
2054 stats->tx_bytes, stats->tx_packets,
2055 stats->tx_errors, stats->tx_dropped,
2056 stats->tx_fifo_errors, stats->collisions,
2057 stats->tx_carrier_errors +
2058 stats->tx_aborted_errors +
2059 stats->tx_window_errors +
2060 stats->tx_heartbeat_errors,
2061 stats->tx_compressed);
2063 seq_printf(seq, "%6s: No statistics available.\n", dev->name);
2067 * Called from the PROCfs module. This now uses the new arbitrary sized
2068 * /proc/net interface to create /proc/net/dev
2070 static int dev_seq_show(struct seq_file *seq, void *v)
2072 if (v == SEQ_START_TOKEN)
2073 seq_puts(seq, "Inter-| Receive "
2075 " face |bytes packets errs drop fifo frame "
2076 "compressed multicast|bytes packets errs "
2077 "drop fifo colls carrier compressed\n");
2079 dev_seq_printf_stats(seq, v);
2083 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2085 struct netif_rx_stats *rc = NULL;
2087 while (*pos < NR_CPUS)
2088 if (cpu_online(*pos)) {
2089 rc = &per_cpu(netdev_rx_stat, *pos);
2096 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2098 return softnet_get_online(pos);
2101 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2104 return softnet_get_online(pos);
2107 static void softnet_seq_stop(struct seq_file *seq, void *v)
2111 static int softnet_seq_show(struct seq_file *seq, void *v)
2113 struct netif_rx_stats *s = v;
2115 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2116 s->total, s->dropped, s->time_squeeze, s->throttled,
2117 s->fastroute_hit, s->fastroute_success, s->fastroute_defer,
2118 s->fastroute_deferred_out,
2120 s->fastroute_latency_reduction
2128 static struct seq_operations dev_seq_ops = {
2129 .start = dev_seq_start,
2130 .next = dev_seq_next,
2131 .stop = dev_seq_stop,
2132 .show = dev_seq_show,
2135 static int dev_seq_open(struct inode *inode, struct file *file)
2137 return seq_open(file, &dev_seq_ops);
2140 static struct file_operations dev_seq_fops = {
2141 .owner = THIS_MODULE,
2142 .open = dev_seq_open,
2144 .llseek = seq_lseek,
2145 .release = seq_release,
2148 static struct seq_operations softnet_seq_ops = {
2149 .start = softnet_seq_start,
2150 .next = softnet_seq_next,
2151 .stop = softnet_seq_stop,
2152 .show = softnet_seq_show,
2155 static int softnet_seq_open(struct inode *inode, struct file *file)
2157 return seq_open(file, &softnet_seq_ops);
2160 static struct file_operations softnet_seq_fops = {
2161 .owner = THIS_MODULE,
2162 .open = softnet_seq_open,
2164 .llseek = seq_lseek,
2165 .release = seq_release,
2169 extern int wireless_proc_init(void);
2171 #define wireless_proc_init() 0
2174 static int __init dev_proc_init(void)
2178 if (!proc_net_fops_create("dev", S_IRUGO, &dev_seq_fops))
2180 if (!proc_net_fops_create("softnet_stat", S_IRUGO, &softnet_seq_fops))
2182 if (wireless_proc_init())
2188 proc_net_remove("softnet_stat");
2190 proc_net_remove("dev");
2194 #define dev_proc_init() 0
2195 #endif /* CONFIG_PROC_FS */
2199 * netdev_set_master - set up master/slave pair
2200 * @slave: slave device
2201 * @master: new master device
2203 * Changes the master device of the slave. Pass %NULL to break the
2204 * bonding. The caller must hold the RTNL semaphore. On a failure
2205 * a negative errno code is returned. On success the reference counts
2206 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2207 * function returns zero.
2209 int netdev_set_master(struct net_device *slave, struct net_device *master)
2211 struct net_device *old = slave->master;
2221 slave->master = master;
2229 slave->flags |= IFF_SLAVE;
2231 slave->flags &= ~IFF_SLAVE;
2233 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2238 * dev_set_promiscuity - update promiscuity count on a device
2242 * Add or remove promsicuity from a device. While the count in the device
2243 * remains above zero the interface remains promiscuous. Once it hits zero
2244 * the device reverts back to normal filtering operation. A negative inc
2245 * value is used to drop promiscuity on the device.
2247 void dev_set_promiscuity(struct net_device *dev, int inc)
2249 unsigned short old_flags = dev->flags;
2251 dev->flags |= IFF_PROMISC;
2252 if ((dev->promiscuity += inc) == 0)
2253 dev->flags &= ~IFF_PROMISC;
2254 if (dev->flags ^ old_flags) {
2255 #ifdef CONFIG_NET_FASTROUTE
2256 if (dev->flags & IFF_PROMISC) {
2257 netdev_fastroute_obstacles++;
2258 dev_clear_fastroute(dev);
2260 netdev_fastroute_obstacles--;
2263 printk(KERN_INFO "device %s %s promiscuous mode\n",
2264 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2270 * dev_set_allmulti - update allmulti count on a device
2274 * Add or remove reception of all multicast frames to a device. While the
2275 * count in the device remains above zero the interface remains listening
2276 * to all interfaces. Once it hits zero the device reverts back to normal
2277 * filtering operation. A negative @inc value is used to drop the counter
2278 * when releasing a resource needing all multicasts.
2281 void dev_set_allmulti(struct net_device *dev, int inc)
2283 unsigned short old_flags = dev->flags;
2285 dev->flags |= IFF_ALLMULTI;
2286 if ((dev->allmulti += inc) == 0)
2287 dev->flags &= ~IFF_ALLMULTI;
2288 if (dev->flags ^ old_flags)
2292 unsigned dev_get_flags(const struct net_device *dev)
2296 flags = (dev->flags & ~(IFF_PROMISC |
2299 (dev->gflags & (IFF_PROMISC |
2302 if (netif_running(dev) && netif_carrier_ok(dev))
2303 flags |= IFF_RUNNING;
2308 int dev_change_flags(struct net_device *dev, unsigned flags)
2311 int old_flags = dev->flags;
2314 * Set the flags on our device.
2317 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
2318 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
2320 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
2324 * Load in the correct multicast list now the flags have changed.
2330 * Have we downed the interface. We handle IFF_UP ourselves
2331 * according to user attempts to set it, rather than blindly
2336 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
2337 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
2343 if (dev->flags & IFF_UP &&
2344 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
2346 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
2348 if ((flags ^ dev->gflags) & IFF_PROMISC) {
2349 int inc = (flags & IFF_PROMISC) ? +1 : -1;
2350 dev->gflags ^= IFF_PROMISC;
2351 dev_set_promiscuity(dev, inc);
2354 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
2355 is important. Some (broken) drivers set IFF_PROMISC, when
2356 IFF_ALLMULTI is requested not asking us and not reporting.
2358 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
2359 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
2360 dev->gflags ^= IFF_ALLMULTI;
2361 dev_set_allmulti(dev, inc);
2364 if (old_flags ^ dev->flags)
2365 rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags ^ dev->flags);
2370 int dev_set_mtu(struct net_device *dev, int new_mtu)
2374 if (new_mtu == dev->mtu)
2377 /* MTU must be positive. */
2381 if (!netif_device_present(dev))
2385 if (dev->change_mtu)
2386 err = dev->change_mtu(dev, new_mtu);
2389 if (!err && dev->flags & IFF_UP)
2390 notifier_call_chain(&netdev_chain,
2391 NETDEV_CHANGEMTU, dev);
2397 * Perform the SIOCxIFxxx calls.
2399 static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
2402 struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
2408 case SIOCGIFFLAGS: /* Get interface flags */
2409 ifr->ifr_flags = dev_get_flags(dev);
2412 case SIOCSIFFLAGS: /* Set interface flags */
2413 return dev_change_flags(dev, ifr->ifr_flags);
2415 case SIOCGIFMETRIC: /* Get the metric on the interface
2416 (currently unused) */
2417 ifr->ifr_metric = 0;
2420 case SIOCSIFMETRIC: /* Set the metric on the interface
2421 (currently unused) */
2424 case SIOCGIFMTU: /* Get the MTU of a device */
2425 ifr->ifr_mtu = dev->mtu;
2428 case SIOCSIFMTU: /* Set the MTU of a device */
2429 return dev_set_mtu(dev, ifr->ifr_mtu);
2432 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
2433 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2434 ifr->ifr_hwaddr.sa_family = dev->type;
2438 if (!dev->set_mac_address)
2440 if (ifr->ifr_hwaddr.sa_family != dev->type)
2442 if (!netif_device_present(dev))
2444 err = dev->set_mac_address(dev, &ifr->ifr_hwaddr);
2446 notifier_call_chain(&netdev_chain,
2447 NETDEV_CHANGEADDR, dev);
2450 case SIOCSIFHWBROADCAST:
2451 if (ifr->ifr_hwaddr.sa_family != dev->type)
2453 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
2454 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2455 notifier_call_chain(&netdev_chain,
2456 NETDEV_CHANGEADDR, dev);
2460 ifr->ifr_map.mem_start = dev->mem_start;
2461 ifr->ifr_map.mem_end = dev->mem_end;
2462 ifr->ifr_map.base_addr = dev->base_addr;
2463 ifr->ifr_map.irq = dev->irq;
2464 ifr->ifr_map.dma = dev->dma;
2465 ifr->ifr_map.port = dev->if_port;
2469 if (dev->set_config) {
2470 if (!netif_device_present(dev))
2472 return dev->set_config(dev, &ifr->ifr_map);
2477 if (!dev->set_multicast_list ||
2478 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2480 if (!netif_device_present(dev))
2482 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
2486 if (!dev->set_multicast_list ||
2487 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2489 if (!netif_device_present(dev))
2491 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
2495 ifr->ifr_ifindex = dev->ifindex;
2499 ifr->ifr_qlen = dev->tx_queue_len;
2503 if (ifr->ifr_qlen < 0)
2505 dev->tx_queue_len = ifr->ifr_qlen;
2509 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
2510 return dev_change_name(dev, ifr->ifr_newname);
2513 * Unknown or private ioctl
2517 if ((cmd >= SIOCDEVPRIVATE &&
2518 cmd <= SIOCDEVPRIVATE + 15) ||
2519 cmd == SIOCBONDENSLAVE ||
2520 cmd == SIOCBONDRELEASE ||
2521 cmd == SIOCBONDSETHWADDR ||
2522 cmd == SIOCBONDSLAVEINFOQUERY ||
2523 cmd == SIOCBONDINFOQUERY ||
2524 cmd == SIOCBONDCHANGEACTIVE ||
2525 cmd == SIOCGMIIPHY ||
2526 cmd == SIOCGMIIREG ||
2527 cmd == SIOCSMIIREG ||
2528 cmd == SIOCBRADDIF ||
2529 cmd == SIOCBRDELIF ||
2530 cmd == SIOCWANDEV) {
2532 if (dev->do_ioctl) {
2533 if (netif_device_present(dev))
2534 err = dev->do_ioctl(dev, ifr,
2547 * This function handles all "interface"-type I/O control requests. The actual
2548 * 'doing' part of this is dev_ifsioc above.
2552 * dev_ioctl - network device ioctl
2553 * @cmd: command to issue
2554 * @arg: pointer to a struct ifreq in user space
2556 * Issue ioctl functions to devices. This is normally called by the
2557 * user space syscall interfaces but can sometimes be useful for
2558 * other purposes. The return value is the return from the syscall if
2559 * positive or a negative errno code on error.
2562 int dev_ioctl(unsigned int cmd, void __user *arg)
2568 /* One special case: SIOCGIFCONF takes ifconf argument
2569 and requires shared lock, because it sleeps writing
2573 if (cmd == SIOCGIFCONF) {
2575 ret = dev_ifconf((char __user *) arg);
2579 if (cmd == SIOCGIFNAME)
2580 return dev_ifname((struct ifreq __user *)arg);
2582 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2585 ifr.ifr_name[IFNAMSIZ-1] = 0;
2587 colon = strchr(ifr.ifr_name, ':');
2592 * See which interface the caller is talking about.
2597 * These ioctl calls:
2598 * - can be done by all.
2599 * - atomic and do not require locking.
2610 dev_load(ifr.ifr_name);
2611 read_lock(&dev_base_lock);
2612 ret = dev_ifsioc(&ifr, cmd);
2613 read_unlock(&dev_base_lock);
2617 if (copy_to_user(arg, &ifr,
2618 sizeof(struct ifreq)))
2624 dev_load(ifr.ifr_name);
2626 ret = dev_ethtool(&ifr);
2631 if (copy_to_user(arg, &ifr,
2632 sizeof(struct ifreq)))
2638 * These ioctl calls:
2639 * - require superuser power.
2640 * - require strict serialization.
2646 if (!capable(CAP_NET_ADMIN))
2648 dev_load(ifr.ifr_name);
2650 ret = dev_ifsioc(&ifr, cmd);
2655 if (copy_to_user(arg, &ifr,
2656 sizeof(struct ifreq)))
2662 * These ioctl calls:
2663 * - require superuser power.
2664 * - require strict serialization.
2665 * - do not return a value
2675 case SIOCSIFHWBROADCAST:
2678 case SIOCBONDENSLAVE:
2679 case SIOCBONDRELEASE:
2680 case SIOCBONDSETHWADDR:
2681 case SIOCBONDSLAVEINFOQUERY:
2682 case SIOCBONDINFOQUERY:
2683 case SIOCBONDCHANGEACTIVE:
2686 if (!capable(CAP_NET_ADMIN))
2688 dev_load(ifr.ifr_name);
2690 ret = dev_ifsioc(&ifr, cmd);
2695 /* Get the per device memory space. We can add this but
2696 * currently do not support it */
2698 /* Set the per device memory buffer space.
2699 * Not applicable in our case */
2704 * Unknown or private ioctl.
2707 if (cmd == SIOCWANDEV ||
2708 (cmd >= SIOCDEVPRIVATE &&
2709 cmd <= SIOCDEVPRIVATE + 15)) {
2710 dev_load(ifr.ifr_name);
2712 ret = dev_ifsioc(&ifr, cmd);
2714 if (!ret && copy_to_user(arg, &ifr,
2715 sizeof(struct ifreq)))
2720 /* Take care of Wireless Extensions */
2721 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
2722 /* If command is `set a parameter', or
2723 * `get the encoding parameters', check if
2724 * the user has the right to do it */
2725 if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE) {
2726 if (!capable(CAP_NET_ADMIN))
2729 dev_load(ifr.ifr_name);
2731 /* Follow me in net/core/wireless.c */
2732 ret = wireless_process_ioctl(&ifr, cmd);
2734 if (!ret && IW_IS_GET(cmd) &&
2735 copy_to_user(arg, &ifr,
2736 sizeof(struct ifreq)))
2740 #endif /* WIRELESS_EXT */
2747 * dev_new_index - allocate an ifindex
2749 * Returns a suitable unique value for a new device interface
2750 * number. The caller must hold the rtnl semaphore or the
2751 * dev_base_lock to be sure it remains unique.
2753 int dev_new_index(void)
2759 if (!__dev_get_by_index(ifindex))
2764 static int dev_boot_phase = 1;
2766 /* Delayed registration/unregisteration */
2767 static spinlock_t net_todo_list_lock = SPIN_LOCK_UNLOCKED;
2768 static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);
2770 static inline void net_set_todo(struct net_device *dev)
2772 spin_lock(&net_todo_list_lock);
2773 list_add_tail(&dev->todo_list, &net_todo_list);
2774 spin_unlock(&net_todo_list_lock);
2778 * register_netdevice - register a network device
2779 * @dev: device to register
2781 * Take a completed network device structure and add it to the kernel
2782 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
2783 * chain. 0 is returned on success. A negative errno code is returned
2784 * on a failure to set up the device, or if the name is a duplicate.
2786 * Callers must hold the rtnl semaphore. See the comment at the
2787 * end of Space.c for details about the locking. You may want
2788 * register_netdev() instead of this.
2791 * The locking appears insufficient to guarantee two parallel registers
2792 * will not get the same name.
2795 int register_netdevice(struct net_device *dev)
2797 struct hlist_head *head;
2798 struct hlist_node *p;
2801 BUG_ON(dev_boot_phase);
2804 /* When net_device's are persistent, this will be fatal. */
2805 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
2807 spin_lock_init(&dev->queue_lock);
2808 spin_lock_init(&dev->xmit_lock);
2809 dev->xmit_lock_owner = -1;
2810 #ifdef CONFIG_NET_FASTROUTE
2811 dev->fastpath_lock = RW_LOCK_UNLOCKED;
2814 ret = alloc_divert_blk(dev);
2820 /* Init, if this function is available */
2822 ret = dev->init(dev);
2830 if (!dev_valid_name(dev->name)) {
2835 dev->ifindex = dev_new_index();
2836 if (dev->iflink == -1)
2837 dev->iflink = dev->ifindex;
2839 /* Check for existence of name */
2840 head = dev_name_hash(dev->name);
2841 hlist_for_each(p, head) {
2842 struct net_device *d
2843 = hlist_entry(p, struct net_device, name_hlist);
2844 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
2850 /* Fix illegal SG+CSUM combinations. */
2851 if ((dev->features & NETIF_F_SG) &&
2852 !(dev->features & (NETIF_F_IP_CSUM |
2854 NETIF_F_HW_CSUM))) {
2855 printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
2857 dev->features &= ~NETIF_F_SG;
2861 * nil rebuild_header routine,
2862 * that should be never called and used as just bug trap.
2865 if (!dev->rebuild_header)
2866 dev->rebuild_header = default_rebuild_header;
2869 * Default initial state at registry is that the
2870 * device is present.
2873 set_bit(__LINK_STATE_PRESENT, &dev->state);
2876 dev_init_scheduler(dev);
2877 write_lock_bh(&dev_base_lock);
2879 dev_tail = &dev->next;
2880 hlist_add_head(&dev->name_hlist, head);
2881 hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex));
2883 dev->reg_state = NETREG_REGISTERING;
2884 write_unlock_bh(&dev_base_lock);
2886 /* Notify protocols, that a new device appeared. */
2887 notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
2889 /* Finish registration after unlock */
2896 free_divert_blk(dev);
2901 * netdev_wait_allrefs - wait until all references are gone.
2903 * This is called when unregistering network devices.
2905 * Any protocol or device that holds a reference should register
2906 * for netdevice notification, and cleanup and put back the
2907 * reference if they receive an UNREGISTER event.
2908 * We can get stuck here if buggy protocols don't correctly
2911 static void netdev_wait_allrefs(struct net_device *dev)
2913 unsigned long rebroadcast_time, warning_time;
2915 rebroadcast_time = warning_time = jiffies;
2916 while (atomic_read(&dev->refcnt) != 0) {
2917 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
2921 /* Rebroadcast unregister notification */
2922 notifier_call_chain(&netdev_chain,
2923 NETDEV_UNREGISTER, dev);
2925 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
2927 /* We must not have linkwatch events
2928 * pending on unregister. If this
2929 * happens, we simply run the queue
2930 * unscheduled, resulting in a noop
2933 linkwatch_run_queue();
2939 rebroadcast_time = jiffies;
2942 current->state = TASK_INTERRUPTIBLE;
2943 schedule_timeout(HZ / 4);
2945 if (time_after(jiffies, warning_time + 10 * HZ)) {
2946 printk(KERN_EMERG "unregister_netdevice: "
2947 "waiting for %s to become free. Usage "
2949 dev->name, atomic_read(&dev->refcnt));
2950 warning_time = jiffies;
2959 * register_netdevice(x1);
2960 * register_netdevice(x2);
2962 * unregister_netdevice(y1);
2963 * unregister_netdevice(y2);
2969 * We are invoked by rtnl_unlock() after it drops the semaphore.
2970 * This allows us to deal with problems:
2971 * 1) We can create/delete sysfs objects which invoke hotplug
2972 * without deadlocking with linkwatch via keventd.
2973 * 2) Since we run with the RTNL semaphore not held, we can sleep
2974 * safely in order to wait for the netdev refcnt to drop to zero.
2976 static DECLARE_MUTEX(net_todo_run_mutex);
2977 void netdev_run_todo(void)
2979 struct list_head list = LIST_HEAD_INIT(list);
2983 /* Need to guard against multiple cpu's getting out of order. */
2984 down(&net_todo_run_mutex);
2986 /* Not safe to do outside the semaphore. We must not return
2987 * until all unregister events invoked by the local processor
2988 * have been completed (either by this todo run, or one on
2991 if (list_empty(&net_todo_list))
2994 /* Snapshot list, allow later requests */
2995 spin_lock(&net_todo_list_lock);
2996 list_splice_init(&net_todo_list, &list);
2997 spin_unlock(&net_todo_list_lock);
2999 while (!list_empty(&list)) {
3000 struct net_device *dev
3001 = list_entry(list.next, struct net_device, todo_list);
3002 list_del(&dev->todo_list);
3004 switch(dev->reg_state) {
3005 case NETREG_REGISTERING:
3006 err = netdev_register_sysfs(dev);
3008 printk(KERN_ERR "%s: failed sysfs registration (%d)\n",
3010 dev->reg_state = NETREG_REGISTERED;
3013 case NETREG_UNREGISTERING:
3014 netdev_unregister_sysfs(dev);
3015 dev->reg_state = NETREG_UNREGISTERED;
3017 netdev_wait_allrefs(dev);
3020 BUG_ON(atomic_read(&dev->refcnt));
3021 BUG_TRAP(!dev->ip_ptr);
3022 BUG_TRAP(!dev->ip6_ptr);
3023 BUG_TRAP(!dev->dn_ptr);
3026 /* It must be the very last action,
3027 * after this 'dev' may point to freed up memory.
3029 if (dev->destructor)
3030 dev->destructor(dev);
3034 printk(KERN_ERR "network todo '%s' but state %d\n",
3035 dev->name, dev->reg_state);
3041 up(&net_todo_run_mutex);
3045 * free_netdev - free network device
3048 * This function does the last stage of destroying an allocated device
3049 * interface. The reference to the device object is released.
3050 * If this is the last reference then it will be freed.
3052 void free_netdev(struct net_device *dev)
3055 /* Compatiablity with error handling in drivers */
3056 if (dev->reg_state == NETREG_UNINITIALIZED) {
3057 kfree((char *)dev - dev->padded);
3061 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
3062 dev->reg_state = NETREG_RELEASED;
3064 /* will free via class release */
3065 class_device_put(&dev->class_dev);
3067 kfree((char *)dev - dev->padded);
3071 /* Synchronize with packet receive processing. */
3072 void synchronize_net(void)
3075 synchronize_kernel();
3079 * unregister_netdevice - remove device from the kernel
3082 * This function shuts down a device interface and removes it
3083 * from the kernel tables. On success 0 is returned, on a failure
3084 * a negative errno code is returned.
3086 * Callers must hold the rtnl semaphore. See the comment at the
3087 * end of Space.c for details about the locking. You may want
3088 * unregister_netdev() instead of this.
3091 int unregister_netdevice(struct net_device *dev)
3093 struct net_device *d, **dp;
3095 BUG_ON(dev_boot_phase);
3098 /* Some devices call without registering for initialization unwind. */
3099 if (dev->reg_state == NETREG_UNINITIALIZED) {
3100 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3101 "was registered\n", dev->name, dev);
3105 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3107 /* If device is running, close it first. */
3108 if (dev->flags & IFF_UP)
3111 /* And unlink it from device chain. */
3112 for (dp = &dev_base; (d = *dp) != NULL; dp = &d->next) {
3114 write_lock_bh(&dev_base_lock);
3115 hlist_del(&dev->name_hlist);
3116 hlist_del(&dev->index_hlist);
3117 if (dev_tail == &dev->next)
3120 write_unlock_bh(&dev_base_lock);
3125 printk(KERN_ERR "unregister net_device: '%s' not found\n",
3130 dev->reg_state = NETREG_UNREGISTERING;
3134 #ifdef CONFIG_NET_FASTROUTE
3135 dev_clear_fastroute(dev);
3138 /* Shutdown queueing discipline. */
3142 /* Notify protocols, that we are about to destroy
3143 this device. They should clean all the things.
3145 notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
3148 * Flush the multicast chain
3150 dev_mc_discard(dev);
3155 /* Notifier chain MUST detach us from master device. */
3156 BUG_TRAP(!dev->master);
3158 free_divert_blk(dev);
3160 /* Finish processing unregister after unlock */
3167 #ifdef CONFIG_HOTPLUG_CPU
3168 static int dev_cpu_callback(struct notifier_block *nfb,
3169 unsigned long action,
3172 struct sk_buff **list_skb;
3173 struct net_device **list_net;
3174 struct sk_buff *skb;
3175 unsigned int cpu, oldcpu = (unsigned long)ocpu;
3176 struct softnet_data *sd, *oldsd;
3178 if (action != CPU_DEAD)
3181 local_irq_disable();
3182 cpu = smp_processor_id();
3183 sd = &per_cpu(softnet_data, cpu);
3184 oldsd = &per_cpu(softnet_data, oldcpu);
3186 /* Find end of our completion_queue. */
3187 list_skb = &sd->completion_queue;
3189 list_skb = &(*list_skb)->next;
3190 /* Append completion queue from offline CPU. */
3191 *list_skb = oldsd->completion_queue;
3192 oldsd->completion_queue = NULL;
3194 /* Find end of our output_queue. */
3195 list_net = &sd->output_queue;
3197 list_net = &(*list_net)->next_sched;
3198 /* Append output queue from offline CPU. */
3199 *list_net = oldsd->output_queue;
3200 oldsd->output_queue = NULL;
3202 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3205 /* Process offline CPU's input_pkt_queue */
3206 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
3211 #endif /* CONFIG_HOTPLUG_CPU */
3215 * Initialize the DEV module. At boot time this walks the device list and
3216 * unhooks any devices that fail to initialise (normally hardware not
3217 * present) and leaves us with a valid list of present and active devices.
3222 * This is called single threaded during boot, so no need
3223 * to take the rtnl semaphore.
3225 static int __init net_dev_init(void)
3227 int i, rc = -ENOMEM;
3229 BUG_ON(!dev_boot_phase);
3231 if (dev_proc_init())
3234 if (netdev_sysfs_init())
3237 INIT_LIST_HEAD(&ptype_all);
3238 for (i = 0; i < 16; i++)
3239 INIT_LIST_HEAD(&ptype_base[i]);
3241 for (i = 0; i < ARRAY_SIZE(dev_name_head); i++)
3242 INIT_HLIST_HEAD(&dev_name_head[i]);
3244 for (i = 0; i < ARRAY_SIZE(dev_index_head); i++)
3245 INIT_HLIST_HEAD(&dev_index_head[i]);
3248 * Initialise the packet receive queues.
3251 for (i = 0; i < NR_CPUS; i++) {
3252 struct softnet_data *queue;
3254 queue = &per_cpu(softnet_data, i);
3255 skb_queue_head_init(&queue->input_pkt_queue);
3256 queue->throttle = 0;
3257 queue->cng_level = 0;
3258 queue->avg_blog = 10; /* arbitrary non-zero */
3259 queue->completion_queue = NULL;
3260 INIT_LIST_HEAD(&queue->poll_list);
3261 set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
3262 queue->backlog_dev.weight = weight_p;
3263 queue->backlog_dev.poll = process_backlog;
3264 atomic_set(&queue->backlog_dev.refcnt, 1);
3267 #ifdef OFFLINE_SAMPLE
3268 samp_timer.expires = jiffies + (10 * HZ);
3269 add_timer(&samp_timer);
3274 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
3275 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
3277 hotcpu_notifier(dev_cpu_callback, 0);
3285 subsys_initcall(net_dev_init);
3287 EXPORT_SYMBOL(__dev_get);
3288 EXPORT_SYMBOL(__dev_get_by_flags);
3289 EXPORT_SYMBOL(__dev_get_by_index);
3290 EXPORT_SYMBOL(__dev_get_by_name);
3291 EXPORT_SYMBOL(__dev_remove_pack);
3292 EXPORT_SYMBOL(__skb_linearize);
3293 EXPORT_SYMBOL(call_netdevice_notifiers);
3294 EXPORT_SYMBOL(dev_add_pack);
3295 EXPORT_SYMBOL(dev_alloc_name);
3296 EXPORT_SYMBOL(dev_close);
3297 EXPORT_SYMBOL(dev_get_by_flags);
3298 EXPORT_SYMBOL(dev_get_by_index);
3299 EXPORT_SYMBOL(dev_get_by_name);
3300 EXPORT_SYMBOL(dev_getbyhwaddr);
3301 EXPORT_SYMBOL(dev_ioctl);
3302 EXPORT_SYMBOL(dev_new_index);
3303 EXPORT_SYMBOL(dev_open);
3304 EXPORT_SYMBOL(dev_queue_xmit);
3305 EXPORT_SYMBOL(dev_queue_xmit_nit);
3306 EXPORT_SYMBOL(dev_remove_pack);
3307 EXPORT_SYMBOL(dev_set_allmulti);
3308 EXPORT_SYMBOL(dev_set_promiscuity);
3309 EXPORT_SYMBOL(free_netdev);
3310 EXPORT_SYMBOL(netdev_boot_setup_check);
3311 EXPORT_SYMBOL(netdev_set_master);
3312 EXPORT_SYMBOL(netdev_state_change);
3313 EXPORT_SYMBOL(netif_receive_skb);
3314 EXPORT_SYMBOL(netif_rx);
3315 EXPORT_SYMBOL(register_gifconf);
3316 EXPORT_SYMBOL(register_netdevice);
3317 EXPORT_SYMBOL(register_netdevice_notifier);
3318 EXPORT_SYMBOL(skb_checksum_help);
3319 EXPORT_SYMBOL(synchronize_net);
3320 EXPORT_SYMBOL(unregister_netdevice);
3321 EXPORT_SYMBOL(unregister_netdevice_notifier);
3323 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
3324 EXPORT_SYMBOL(br_handle_frame_hook);
3326 /* for 801q VLAN support */
3327 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
3328 EXPORT_SYMBOL(dev_change_flags);
3331 EXPORT_SYMBOL(dev_load);
3333 #ifdef CONFIG_NET_HW_FLOWCONTROL
3334 EXPORT_SYMBOL(netdev_dropping);
3335 EXPORT_SYMBOL(netdev_fc_xoff);
3336 EXPORT_SYMBOL(netdev_register_fc);
3337 EXPORT_SYMBOL(netdev_unregister_fc);
3339 #ifdef CONFIG_NET_FASTROUTE
3340 EXPORT_SYMBOL(netdev_fastroute);
3341 EXPORT_SYMBOL(netdev_fastroute_obstacles);
3344 EXPORT_PER_CPU_SYMBOL(softnet_data);