2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <asm/bitops.h>
78 #include <linux/config.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/string.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/errno.h>
88 #include <linux/interrupt.h>
89 #include <linux/if_ether.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/notifier.h>
93 #include <linux/skbuff.h>
95 #include <linux/rtnetlink.h>
96 #include <linux/proc_fs.h>
97 #include <linux/seq_file.h>
98 #include <linux/stat.h>
99 #include <linux/if_bridge.h>
100 #include <linux/divert.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <linux/highmem.h>
105 #include <linux/init.h>
106 #include <linux/kmod.h>
107 #include <linux/module.h>
108 #include <linux/kallsyms.h>
109 #include <linux/netpoll.h>
110 #include <linux/rcupdate.h>
111 #ifdef CONFIG_NET_RADIO
112 #include <linux/wireless.h> /* Note : will define WIRELESS_EXT */
113 #include <net/iw_handler.h>
114 #endif /* CONFIG_NET_RADIO */
115 #include <asm/current.h>
116 #include <linux/vs_network.h>
118 /* This define, if set, will randomly drop a packet when congestion
119 * is more than moderate. It helps fairness in the multi-interface
120 * case when one of them is a hog, but it kills performance for the
121 * single interface case so it is off now by default.
125 /* Setting this will sample the queue lengths and thus congestion
126 * via a timer instead of as each packet is received.
128 #undef OFFLINE_SAMPLE
131 * The list of packet types we will receive (as opposed to discard)
132 * and the routines to invoke.
134 * Why 16. Because with 16 the only overlap we get on a hash of the
135 * low nibble of the protocol value is RARP/SNAP/X.25.
137 * NOTE: That is no longer true with the addition of VLAN tags. Not
138 * sure which should go first, but I bet it won't make much
139 * difference if we are running VLANs. The good news is that
140 * this protocol won't be in the list unless compiled in, so
141 * the average user (w/out VLANs) will not be adversly affected.
158 static spinlock_t ptype_lock = SPIN_LOCK_UNLOCKED;
159 static struct list_head ptype_base[16]; /* 16 way hashed list */
160 static struct list_head ptype_all; /* Taps */
162 #ifdef OFFLINE_SAMPLE
163 static void sample_queue(unsigned long dummy);
164 static struct timer_list samp_timer = TIMER_INITIALIZER(sample_queue, 0, 0);
168 * The @dev_base list is protected by @dev_base_lock and the rtln
171 * Pure readers hold dev_base_lock for reading.
173 * Writers must hold the rtnl semaphore while they loop through the
174 * dev_base list, and hold dev_base_lock for writing when they do the
175 * actual updates. This allows pure readers to access the list even
176 * while a writer is preparing to update it.
178 * To put it another way, dev_base_lock is held for writing only to
179 * protect against pure readers; the rtnl semaphore provides the
180 * protection against other writers.
182 * See, for example usages, register_netdevice() and
183 * unregister_netdevice(), which must be called with the rtnl
186 struct net_device *dev_base;
187 struct net_device **dev_tail = &dev_base;
188 rwlock_t dev_base_lock = RW_LOCK_UNLOCKED;
190 EXPORT_SYMBOL(dev_base);
191 EXPORT_SYMBOL(dev_base_lock);
193 #define NETDEV_HASHBITS 8
194 static struct hlist_head dev_name_head[1<<NETDEV_HASHBITS];
195 static struct hlist_head dev_index_head[1<<NETDEV_HASHBITS];
197 static inline struct hlist_head *dev_name_hash(const char *name)
199 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
200 return &dev_name_head[hash & ((1<<NETDEV_HASHBITS)-1)];
203 static inline struct hlist_head *dev_index_hash(int ifindex)
205 return &dev_index_head[ifindex & ((1<<NETDEV_HASHBITS)-1)];
212 static struct notifier_block *netdev_chain;
215 * Device drivers call our routines to queue packets here. We empty the
216 * queue in the local softnet handler.
218 DEFINE_PER_CPU(struct softnet_data, softnet_data) = { 0, };
221 extern int netdev_sysfs_init(void);
222 extern int netdev_register_sysfs(struct net_device *);
223 extern void netdev_unregister_sysfs(struct net_device *);
225 #define netdev_sysfs_init() (0)
226 #define netdev_register_sysfs(dev) (0)
227 #define netdev_unregister_sysfs(dev) do { } while(0)
230 /* netdump function */
231 void (*netdump_func) (struct pt_regs *regs) = NULL;
233 /*******************************************************************************
235 Protocol management and registration routines
237 *******************************************************************************/
246 * Add a protocol ID to the list. Now that the input handler is
247 * smarter we can dispense with all the messy stuff that used to be
250 * BEWARE!!! Protocol handlers, mangling input packets,
251 * MUST BE last in hash buckets and checking protocol handlers
252 * MUST start from promiscuous ptype_all chain in net_bh.
253 * It is true now, do not change it.
254 * Explanation follows: if protocol handler, mangling packet, will
255 * be the first on list, it is not able to sense, that packet
256 * is cloned and should be copied-on-write, so that it will
257 * change it and subsequent readers will get broken packet.
262 * dev_add_pack - add packet handler
263 * @pt: packet type declaration
265 * Add a protocol handler to the networking stack. The passed &packet_type
266 * is linked into kernel lists and may not be freed until it has been
267 * removed from the kernel lists.
269 * This call does not sleep therefore it can not
270 * guarantee all CPU's that are in middle of receiving packets
271 * will see the new packet type (until the next received packet).
274 void dev_add_pack(struct packet_type *pt)
278 spin_lock_bh(&ptype_lock);
279 if (pt->type == htons(ETH_P_ALL)) {
281 list_add_rcu(&pt->list, &ptype_all);
283 hash = ntohs(pt->type) & 15;
284 list_add_rcu(&pt->list, &ptype_base[hash]);
286 spin_unlock_bh(&ptype_lock);
289 extern void linkwatch_run_queue(void);
294 * __dev_remove_pack - remove packet handler
295 * @pt: packet type declaration
297 * Remove a protocol handler that was previously added to the kernel
298 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
299 * from the kernel lists and can be freed or reused once this function
302 * The packet type might still be in use by receivers
303 * and must not be freed until after all the CPU's have gone
304 * through a quiescent state.
306 void __dev_remove_pack(struct packet_type *pt)
308 struct list_head *head;
309 struct packet_type *pt1;
311 spin_lock_bh(&ptype_lock);
313 if (pt->type == htons(ETH_P_ALL)) {
317 head = &ptype_base[ntohs(pt->type) & 15];
319 list_for_each_entry(pt1, head, list) {
321 list_del_rcu(&pt->list);
326 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
328 spin_unlock_bh(&ptype_lock);
331 * dev_remove_pack - remove packet handler
332 * @pt: packet type declaration
334 * Remove a protocol handler that was previously added to the kernel
335 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
336 * from the kernel lists and can be freed or reused once this function
339 * This call sleeps to guarantee that no CPU is looking at the packet
342 void dev_remove_pack(struct packet_type *pt)
344 __dev_remove_pack(pt);
349 /******************************************************************************
351 Device Boot-time Settings Routines
353 *******************************************************************************/
355 /* Boot time configuration table */
356 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
359 * netdev_boot_setup_add - add new setup entry
360 * @name: name of the device
361 * @map: configured settings for the device
363 * Adds new setup entry to the dev_boot_setup list. The function
364 * returns 0 on error and 1 on success. This is a generic routine to
367 int netdev_boot_setup_add(char *name, struct ifmap *map)
369 struct netdev_boot_setup *s;
373 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
374 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
375 memset(s[i].name, 0, sizeof(s[i].name));
376 strcpy(s[i].name, name);
377 memcpy(&s[i].map, map, sizeof(s[i].map));
382 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
386 * netdev_boot_setup_check - check boot time settings
387 * @dev: the netdevice
389 * Check boot time settings for the device.
390 * The found settings are set for the device to be used
391 * later in the device probing.
392 * Returns 0 if no settings found, 1 if they are.
394 int netdev_boot_setup_check(struct net_device *dev)
396 struct netdev_boot_setup *s = dev_boot_setup;
399 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
400 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
401 !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
402 dev->irq = s[i].map.irq;
403 dev->base_addr = s[i].map.base_addr;
404 dev->mem_start = s[i].map.mem_start;
405 dev->mem_end = s[i].map.mem_end;
414 * netdev_boot_base - get address from boot time settings
415 * @prefix: prefix for network device
416 * @unit: id for network device
418 * Check boot time settings for the base address of device.
419 * The found settings are set for the device to be used
420 * later in the device probing.
421 * Returns 0 if no settings found.
423 unsigned long netdev_boot_base(const char *prefix, int unit)
425 const struct netdev_boot_setup *s = dev_boot_setup;
429 sprintf(name, "%s%d", prefix, unit);
432 * If device already registered then return base of 1
433 * to indicate not to probe for this interface
435 if (__dev_get_by_name(name))
438 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
439 if (!strcmp(name, s[i].name))
440 return s[i].map.base_addr;
445 * Saves at boot time configured settings for any netdevice.
447 int __init netdev_boot_setup(char *str)
452 str = get_options(str, ARRAY_SIZE(ints), ints);
457 memset(&map, 0, sizeof(map));
461 map.base_addr = ints[2];
463 map.mem_start = ints[3];
465 map.mem_end = ints[4];
467 /* Add new entry to the list */
468 return netdev_boot_setup_add(str, &map);
471 __setup("netdev=", netdev_boot_setup);
473 /*******************************************************************************
475 Device Interface Subroutines
477 *******************************************************************************/
480 * __dev_get_by_name - find a device by its name
481 * @name: name to find
483 * Find an interface by name. Must be called under RTNL semaphore
484 * or @dev_base_lock. If the name is found a pointer to the device
485 * is returned. If the name is not found then %NULL is returned. The
486 * reference counters are not incremented so the caller must be
487 * careful with locks.
490 struct net_device *__dev_get_by_name(const char *name)
492 struct hlist_node *p;
494 hlist_for_each(p, dev_name_hash(name)) {
495 struct net_device *dev
496 = hlist_entry(p, struct net_device, name_hlist);
497 if (!strncmp(dev->name, name, IFNAMSIZ))
504 * dev_get_by_name - find a device by its name
505 * @name: name to find
507 * Find an interface by name. This can be called from any
508 * context and does its own locking. The returned handle has
509 * the usage count incremented and the caller must use dev_put() to
510 * release it when it is no longer needed. %NULL is returned if no
511 * matching device is found.
514 struct net_device *dev_get_by_name(const char *name)
516 struct net_device *dev;
518 read_lock(&dev_base_lock);
519 dev = __dev_get_by_name(name);
522 read_unlock(&dev_base_lock);
527 Return value is changed to int to prevent illegal usage in future.
528 It is still legal to use to check for device existence.
530 User should understand, that the result returned by this function
531 is meaningless, if it was not issued under rtnl semaphore.
535 * dev_get - test if a device exists
536 * @name: name to test for
538 * Test if a name exists. Returns true if the name is found. In order
539 * to be sure the name is not allocated or removed during the test the
540 * caller must hold the rtnl semaphore.
542 * This function exists only for back compatibility with older
545 int __dev_get(const char *name)
547 struct net_device *dev;
549 read_lock(&dev_base_lock);
550 dev = __dev_get_by_name(name);
551 read_unlock(&dev_base_lock);
556 * __dev_get_by_index - find a device by its ifindex
557 * @ifindex: index of device
559 * Search for an interface by index. Returns %NULL if the device
560 * is not found or a pointer to the device. The device has not
561 * had its reference counter increased so the caller must be careful
562 * about locking. The caller must hold either the RTNL semaphore
566 struct net_device *__dev_get_by_index(int ifindex)
568 struct hlist_node *p;
570 hlist_for_each(p, dev_index_hash(ifindex)) {
571 struct net_device *dev
572 = hlist_entry(p, struct net_device, index_hlist);
573 if (dev->ifindex == ifindex)
581 * dev_get_by_index - find a device by its ifindex
582 * @ifindex: index of device
584 * Search for an interface by index. Returns NULL if the device
585 * is not found or a pointer to the device. The device returned has
586 * had a reference added and the pointer is safe until the user calls
587 * dev_put to indicate they have finished with it.
590 struct net_device *dev_get_by_index(int ifindex)
592 struct net_device *dev;
594 read_lock(&dev_base_lock);
595 dev = __dev_get_by_index(ifindex);
598 read_unlock(&dev_base_lock);
603 * dev_getbyhwaddr - find a device by its hardware address
604 * @type: media type of device
605 * @ha: hardware address
607 * Search for an interface by MAC address. Returns NULL if the device
608 * is not found or a pointer to the device. The caller must hold the
609 * rtnl semaphore. The returned device has not had its ref count increased
610 * and the caller must therefore be careful about locking
613 * If the API was consistent this would be __dev_get_by_hwaddr
616 struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
618 struct net_device *dev;
622 for (dev = dev_base; dev; dev = dev->next)
623 if (dev->type == type &&
624 !memcmp(dev->dev_addr, ha, dev->addr_len))
629 struct net_device *__dev_getfirstbyhwtype(unsigned short type)
631 struct net_device *dev;
633 for (dev = dev_base; dev; dev = dev->next)
634 if (dev->type == type)
639 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
641 struct net_device *dev_getfirstbyhwtype(unsigned short type)
643 struct net_device *dev;
646 dev = __dev_getfirstbyhwtype(type);
653 EXPORT_SYMBOL(dev_getfirstbyhwtype);
656 * dev_get_by_flags - find any device with given flags
657 * @if_flags: IFF_* values
658 * @mask: bitmask of bits in if_flags to check
660 * Search for any interface with the given flags. Returns NULL if a device
661 * is not found or a pointer to the device. The device returned has
662 * had a reference added and the pointer is safe until the user calls
663 * dev_put to indicate they have finished with it.
666 struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask)
668 struct net_device *dev;
670 read_lock(&dev_base_lock);
671 dev = __dev_get_by_flags(if_flags, mask);
674 read_unlock(&dev_base_lock);
679 * __dev_get_by_flags - find any device with given flags
680 * @if_flags: IFF_* values
681 * @mask: bitmask of bits in if_flags to check
683 * Search for any interface with the given flags. Returns NULL if a device
684 * is not found or a pointer to the device. The caller must hold either
685 * the RTNL semaphore or @dev_base_lock.
688 struct net_device *__dev_get_by_flags(unsigned short if_flags, unsigned short mask)
690 struct net_device *dev;
692 for (dev = dev_base; dev != NULL; dev = dev->next) {
693 if (((dev->flags ^ if_flags) & mask) == 0)
700 * dev_valid_name - check if name is okay for network device
703 * Network device names need to be valid file names to
704 * to allow sysfs to work
706 int dev_valid_name(const char *name)
708 return !(*name == '\0'
709 || !strcmp(name, ".")
710 || !strcmp(name, "..")
711 || strchr(name, '/'));
715 * dev_alloc_name - allocate a name for a device
717 * @name: name format string
719 * Passed a format string - eg "lt%d" it will try and find a suitable
720 * id. Not efficient for many devices, not called a lot. The caller
721 * must hold the dev_base or rtnl lock while allocating the name and
722 * adding the device in order to avoid duplicates. Returns the number
723 * of the unit assigned or a negative errno code.
726 int dev_alloc_name(struct net_device *dev, const char *name)
731 const int max_netdevices = 8*PAGE_SIZE;
733 struct net_device *d;
735 p = strnchr(name, IFNAMSIZ-1, '%');
738 * Verify the string as this thing may have come from
739 * the user. There must be either one "%d" and no other "%"
742 if (p[1] != 'd' || strchr(p + 2, '%'))
745 /* Use one page as a bit array of possible slots */
746 inuse = (long *) get_zeroed_page(GFP_ATOMIC);
750 for (d = dev_base; d; d = d->next) {
751 if (!sscanf(d->name, name, &i))
753 if (i < 0 || i >= max_netdevices)
756 /* avoid cases where sscanf is not exact inverse of printf */
757 snprintf(buf, sizeof(buf), name, i);
758 if (!strncmp(buf, d->name, IFNAMSIZ))
762 i = find_first_zero_bit(inuse, max_netdevices);
763 free_page((unsigned long) inuse);
766 snprintf(buf, sizeof(buf), name, i);
767 if (!__dev_get_by_name(buf)) {
768 strlcpy(dev->name, buf, IFNAMSIZ);
772 /* It is possible to run out of possible slots
773 * when the name is long and there isn't enough space left
774 * for the digits, or if all bits are used.
781 * dev_change_name - change name of a device
783 * @newname: name (or format string) must be at least IFNAMSIZ
785 * Change name of a device, can pass format strings "eth%d".
788 int dev_change_name(struct net_device *dev, char *newname)
794 if (dev->flags & IFF_UP)
797 if (!dev_valid_name(newname))
800 if (strchr(newname, '%')) {
801 err = dev_alloc_name(dev, newname);
804 strcpy(newname, dev->name);
806 else if (__dev_get_by_name(newname))
809 strlcpy(dev->name, newname, IFNAMSIZ);
811 err = class_device_rename(&dev->class_dev, dev->name);
813 hlist_del(&dev->name_hlist);
814 hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
815 notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
822 * netdev_state_change - device changes state
823 * @dev: device to cause notification
825 * Called to indicate a device has changed state. This function calls
826 * the notifier chains for netdev_chain and sends a NEWLINK message
827 * to the routing socket.
829 void netdev_state_change(struct net_device *dev)
831 if (dev->flags & IFF_UP) {
832 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
833 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
838 * dev_load - load a network module
839 * @name: name of interface
841 * If a network interface is not present and the process has suitable
842 * privileges this function loads the module. If module loading is not
843 * available in this kernel then it becomes a nop.
846 void dev_load(const char *name)
848 struct net_device *dev;
850 read_lock(&dev_base_lock);
851 dev = __dev_get_by_name(name);
852 read_unlock(&dev_base_lock);
854 if (!dev && capable(CAP_SYS_MODULE))
855 request_module("%s", name);
858 static int default_rebuild_header(struct sk_buff *skb)
860 printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n",
861 skb->dev ? skb->dev->name : "NULL!!!");
868 * Some old buggy device drivers change get_stats after registering
869 * the device. Try and trap them here.
870 * This can be elimnated when all devices are known fixed.
872 static inline int get_stats_changed(struct net_device *dev)
874 int changed = dev->last_stats != dev->get_stats;
875 dev->last_stats = dev->get_stats;
880 * dev_open - prepare an interface for use.
881 * @dev: device to open
883 * Takes a device from down to up state. The device's private open
884 * function is invoked and then the multicast lists are loaded. Finally
885 * the device is moved into the up state and a %NETDEV_UP message is
886 * sent to the netdev notifier chain.
888 * Calling this function on an active interface is a nop. On a failure
889 * a negative errno code is returned.
891 int dev_open(struct net_device *dev)
899 if (dev->flags & IFF_UP)
903 * Check for broken device drivers.
905 if (get_stats_changed(dev) && net_ratelimit()) {
906 printk(KERN_ERR "%s: driver changed get_stats after register\n",
911 * Is it even present?
913 if (!netif_device_present(dev))
917 * Call device private open method
919 set_bit(__LINK_STATE_START, &dev->state);
921 ret = dev->open(dev);
923 clear_bit(__LINK_STATE_START, &dev->state);
927 * Check for more broken device drivers.
929 if (get_stats_changed(dev) && net_ratelimit()) {
930 printk(KERN_ERR "%s: driver changed get_stats in open\n",
935 * If it went open OK then:
942 dev->flags |= IFF_UP;
945 * Initialize multicasting status
950 * Wakeup transmit queue engine
955 * ... and announce new interface.
957 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
963 * dev_close - shutdown an interface.
964 * @dev: device to shutdown
966 * This function moves an active device into down state. A
967 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
968 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
971 int dev_close(struct net_device *dev)
973 if (!(dev->flags & IFF_UP))
977 * Tell people we are going down, so that they can
978 * prepare to death, when device is still operating.
980 notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
984 clear_bit(__LINK_STATE_START, &dev->state);
986 /* Synchronize to scheduled poll. We cannot touch poll list,
987 * it can be even on different cpu. So just clear netif_running(),
988 * and wait when poll really will happen. Actually, the best place
989 * for this is inside dev->stop() after device stopped its irq
990 * engine, but this requires more changes in devices. */
992 smp_mb__after_clear_bit(); /* Commit netif_running(). */
993 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
995 current->state = TASK_INTERRUPTIBLE;
1000 * Call the device specific close. This cannot fail.
1001 * Only if device is UP
1003 * We allow it to be called even after a DETACH hot-plug
1010 * Device is now down.
1013 dev->flags &= ~IFF_UP;
1016 * Tell people we are down
1018 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
1025 * Device change register/unregister. These are not inline or static
1026 * as we export them to the world.
1030 * register_netdevice_notifier - register a network notifier block
1033 * Register a notifier to be called when network device events occur.
1034 * The notifier passed is linked into the kernel structures and must
1035 * not be reused until it has been unregistered. A negative errno code
1036 * is returned on a failure.
1038 * When registered all registration and up events are replayed
1039 * to the new notifier to allow device to have a race free
1040 * view of the network device list.
1043 int register_netdevice_notifier(struct notifier_block *nb)
1045 struct net_device *dev;
1049 err = notifier_chain_register(&netdev_chain, nb);
1051 for (dev = dev_base; dev; dev = dev->next) {
1052 nb->notifier_call(nb, NETDEV_REGISTER, dev);
1054 if (dev->flags & IFF_UP)
1055 nb->notifier_call(nb, NETDEV_UP, dev);
1063 * unregister_netdevice_notifier - unregister a network notifier block
1066 * Unregister a notifier previously registered by
1067 * register_netdevice_notifier(). The notifier is unlinked into the
1068 * kernel structures and may then be reused. A negative errno code
1069 * is returned on a failure.
1072 int unregister_netdevice_notifier(struct notifier_block *nb)
1074 return notifier_chain_unregister(&netdev_chain, nb);
1078 * call_netdevice_notifiers - call all network notifier blocks
1079 * @val: value passed unmodified to notifier function
1080 * @v: pointer passed unmodified to notifier function
1082 * Call all network notifier blocks. Parameters and return value
1083 * are as for notifier_call_chain().
1086 int call_netdevice_notifiers(unsigned long val, void *v)
1088 return notifier_call_chain(&netdev_chain, val, v);
1092 * Support routine. Sends outgoing frames to any network
1093 * taps currently in use.
1096 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1098 struct packet_type *ptype;
1099 net_timestamp(&skb->stamp);
1102 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1103 /* Never send packets back to the socket
1104 * they originated from - MvS (miquels@drinkel.ow.org)
1106 if ((ptype->dev == dev || !ptype->dev) &&
1107 (ptype->af_packet_priv == NULL ||
1108 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1109 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1113 /* skb->nh should be correctly
1114 set by sender, so that the second statement is
1115 just protection against buggy protocols.
1117 skb2->mac.raw = skb2->data;
1119 if (skb2->nh.raw < skb2->data ||
1120 skb2->nh.raw > skb2->tail) {
1121 if (net_ratelimit())
1122 printk(KERN_CRIT "protocol %04x is "
1124 skb2->protocol, dev->name);
1125 skb2->nh.raw = skb2->data;
1128 skb2->h.raw = skb2->nh.raw;
1129 skb2->pkt_type = PACKET_OUTGOING;
1130 ptype->func(skb2, skb->dev, ptype);
1137 * Invalidate hardware checksum when packet is to be mangled, and
1138 * complete checksum manually on outgoing path.
1140 int skb_checksum_help(struct sk_buff **pskb, int inward)
1143 int ret = 0, offset = (*pskb)->h.raw - (*pskb)->data;
1146 (*pskb)->ip_summed = CHECKSUM_NONE;
1150 if (skb_shared(*pskb) || skb_cloned(*pskb)) {
1151 struct sk_buff *newskb = skb_copy(*pskb, GFP_ATOMIC);
1157 skb_set_owner_w(newskb, (*pskb)->sk);
1162 if (offset > (int)(*pskb)->len)
1164 csum = skb_checksum(*pskb, offset, (*pskb)->len-offset, 0);
1166 offset = (*pskb)->tail - (*pskb)->h.raw;
1169 if ((*pskb)->csum + 2 > offset)
1172 *(u16*)((*pskb)->h.raw + (*pskb)->csum) = csum_fold(csum);
1173 (*pskb)->ip_summed = CHECKSUM_NONE;
1178 #ifdef CONFIG_HIGHMEM
1179 /* Actually, we should eliminate this check as soon as we know, that:
1180 * 1. IOMMU is present and allows to map all the memory.
1181 * 2. No high memory really exists on this machine.
1184 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1188 if (dev->features & NETIF_F_HIGHDMA)
1191 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1192 if (skb_shinfo(skb)->frags[i].page >= highmem_start_page)
1198 #define illegal_highdma(dev, skb) (0)
1201 extern void skb_release_data(struct sk_buff *);
1203 /* Keep head the same: replace data */
1204 int __skb_linearize(struct sk_buff *skb, int gfp_mask)
1209 struct skb_shared_info *ninfo;
1210 int headerlen = skb->data - skb->head;
1211 int expand = (skb->tail + skb->data_len) - skb->end;
1213 if (skb_shared(skb))
1219 size = skb->end - skb->head + expand;
1220 size = SKB_DATA_ALIGN(size);
1221 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
1225 /* Copy entire thing */
1226 if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
1230 ninfo = (struct skb_shared_info*)(data + size);
1231 atomic_set(&ninfo->dataref, 1);
1232 ninfo->tso_size = skb_shinfo(skb)->tso_size;
1233 ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
1234 ninfo->nr_frags = 0;
1235 ninfo->frag_list = NULL;
1237 /* Offset between the two in bytes */
1238 offset = data - skb->head;
1240 /* Free old data. */
1241 skb_release_data(skb);
1244 skb->end = data + size;
1246 /* Set up new pointers */
1247 skb->h.raw += offset;
1248 skb->nh.raw += offset;
1249 skb->mac.raw += offset;
1250 skb->tail += offset;
1251 skb->data += offset;
1253 /* We are no longer a clone, even if we were. */
1256 skb->tail += skb->data_len;
1261 #define HARD_TX_LOCK_BH(dev, cpu) { \
1262 if ((dev->features & NETIF_F_LLTX) == 0) { \
1263 spin_lock_bh(&dev->xmit_lock); \
1264 dev->xmit_lock_owner = cpu; \
1268 #define HARD_TX_UNLOCK_BH(dev) { \
1269 if ((dev->features & NETIF_F_LLTX) == 0) { \
1270 dev->xmit_lock_owner = -1; \
1271 spin_unlock_bh(&dev->xmit_lock); \
1275 static inline void qdisc_run(struct net_device *dev)
1277 while (!netif_queue_stopped(dev) &&
1278 qdisc_restart(dev)<0)
1283 * dev_queue_xmit - transmit a buffer
1284 * @skb: buffer to transmit
1286 * Queue a buffer for transmission to a network device. The caller must
1287 * have set the device and priority and built the buffer before calling
1288 * this function. The function can be called from an interrupt.
1290 * A negative errno code is returned on a failure. A success does not
1291 * guarantee the frame will be transmitted as it may be dropped due
1292 * to congestion or traffic shaping.
1295 int dev_queue_xmit(struct sk_buff *skb)
1297 struct net_device *dev = skb->dev;
1301 if (skb_shinfo(skb)->frag_list &&
1302 !(dev->features & NETIF_F_FRAGLIST) &&
1303 __skb_linearize(skb, GFP_ATOMIC))
1306 /* Fragmented skb is linearized if device does not support SG,
1307 * or if at least one of fragments is in highmem and device
1308 * does not support DMA from it.
1310 if (skb_shinfo(skb)->nr_frags &&
1311 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1312 __skb_linearize(skb, GFP_ATOMIC))
1315 /* If packet is not checksummed and device does not support
1316 * checksumming for this protocol, complete checksumming here.
1318 if (skb->ip_summed == CHECKSUM_HW &&
1319 (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) &&
1320 (!(dev->features & NETIF_F_IP_CSUM) ||
1321 skb->protocol != htons(ETH_P_IP))))
1322 if (skb_checksum_help(&skb, 0))
1326 /* Updates of qdisc are serialized by queue_lock.
1327 * The struct Qdisc which is pointed to by qdisc is now a
1328 * rcu structure - it may be accessed without acquiring
1329 * a lock (but the structure may be stale.) The freeing of the
1330 * qdisc will be deferred until it's known that there are no
1331 * more references to it.
1333 * If the qdisc has an enqueue function, we still need to
1334 * hold the queue_lock before calling it, since queue_lock
1335 * also serializes access to the device queue.
1339 smp_read_barrier_depends();
1340 #ifdef CONFIG_NET_CLS_ACT
1341 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1344 /* Grab device queue */
1345 spin_lock_bh(&dev->queue_lock);
1347 rc = q->enqueue(skb, q);
1351 spin_unlock_bh(&dev->queue_lock);
1353 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1358 /* The device has no queue. Common case for software devices:
1359 loopback, all the sorts of tunnels...
1361 Really, it is unlikely that xmit_lock protection is necessary here.
1362 (f.e. loopback and IP tunnels are clean ignoring statistics
1364 However, it is possible, that they rely on protection
1367 Check this and shot the lock. It is not prone from deadlocks.
1368 Either shot noqueue qdisc, it is even simpler 8)
1370 if (dev->flags & IFF_UP) {
1371 int cpu = get_cpu();
1373 if (dev->xmit_lock_owner != cpu) {
1375 HARD_TX_LOCK_BH(dev, cpu);
1378 if (!netif_queue_stopped(dev)) {
1380 dev_queue_xmit_nit(skb, dev);
1383 if (!dev->hard_start_xmit(skb, dev)) {
1384 HARD_TX_UNLOCK_BH(dev);
1388 HARD_TX_UNLOCK_BH(dev);
1389 if (net_ratelimit())
1390 printk(KERN_CRIT "Virtual device %s asks to "
1391 "queue packet!\n", dev->name);
1395 /* Recursion is detected! It is possible,
1397 if (net_ratelimit())
1398 printk(KERN_CRIT "Dead loop on virtual device "
1399 "%s, fix it urgently!\n", dev->name);
1411 /*=======================================================================
1413 =======================================================================*/
1415 int netdev_max_backlog = 300;
1416 int weight_p = 64; /* old backlog weight */
1417 /* These numbers are selected based on intuition and some
1418 * experimentatiom, if you have more scientific way of doing this
1419 * please go ahead and fix things.
1421 int no_cong_thresh = 10;
1426 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1429 #ifdef CONFIG_NET_HW_FLOWCONTROL
1430 atomic_t netdev_dropping = ATOMIC_INIT(0);
1431 static unsigned long netdev_fc_mask = 1;
1432 unsigned long netdev_fc_xoff;
1433 spinlock_t netdev_fc_lock = SPIN_LOCK_UNLOCKED;
1437 void (*stimul)(struct net_device *);
1438 struct net_device *dev;
1439 } netdev_fc_slots[BITS_PER_LONG];
1441 int netdev_register_fc(struct net_device *dev,
1442 void (*stimul)(struct net_device *dev))
1445 unsigned long flags;
1447 spin_lock_irqsave(&netdev_fc_lock, flags);
1448 if (netdev_fc_mask != ~0UL) {
1449 bit = ffz(netdev_fc_mask);
1450 netdev_fc_slots[bit].stimul = stimul;
1451 netdev_fc_slots[bit].dev = dev;
1452 set_bit(bit, &netdev_fc_mask);
1453 clear_bit(bit, &netdev_fc_xoff);
1455 spin_unlock_irqrestore(&netdev_fc_lock, flags);
1459 void netdev_unregister_fc(int bit)
1461 unsigned long flags;
1463 spin_lock_irqsave(&netdev_fc_lock, flags);
1465 netdev_fc_slots[bit].stimul = NULL;
1466 netdev_fc_slots[bit].dev = NULL;
1467 clear_bit(bit, &netdev_fc_mask);
1468 clear_bit(bit, &netdev_fc_xoff);
1470 spin_unlock_irqrestore(&netdev_fc_lock, flags);
1473 static void netdev_wakeup(void)
1477 spin_lock(&netdev_fc_lock);
1478 xoff = netdev_fc_xoff;
1483 netdev_fc_slots[i].stimul(netdev_fc_slots[i].dev);
1485 spin_unlock(&netdev_fc_lock);
1489 static void get_sample_stats(int cpu)
1495 struct softnet_data *sd = &per_cpu(softnet_data, cpu);
1496 int blog = sd->input_pkt_queue.qlen;
1497 int avg_blog = sd->avg_blog;
1499 avg_blog = (avg_blog >> 1) + (blog >> 1);
1501 if (avg_blog > mod_cong) {
1502 /* Above moderate congestion levels. */
1503 sd->cng_level = NET_RX_CN_HIGH;
1506 rq = rd % netdev_max_backlog;
1507 if (rq < avg_blog) /* unlucky bastard */
1508 sd->cng_level = NET_RX_DROP;
1510 } else if (avg_blog > lo_cong) {
1511 sd->cng_level = NET_RX_CN_MOD;
1514 rq = rd % netdev_max_backlog;
1515 if (rq < avg_blog) /* unlucky bastard */
1516 sd->cng_level = NET_RX_CN_HIGH;
1518 } else if (avg_blog > no_cong)
1519 sd->cng_level = NET_RX_CN_LOW;
1520 else /* no congestion */
1521 sd->cng_level = NET_RX_SUCCESS;
1523 sd->avg_blog = avg_blog;
1526 #ifdef OFFLINE_SAMPLE
1527 static void sample_queue(unsigned long dummy)
1529 /* 10 ms 0r 1ms -- i don't care -- JHS */
1531 int cpu = smp_processor_id();
1533 get_sample_stats(cpu);
1534 next_tick += jiffies;
1535 mod_timer(&samp_timer, next_tick);
1541 * netif_rx - post buffer to the network code
1542 * @skb: buffer to post
1544 * This function receives a packet from a device driver and queues it for
1545 * the upper (protocol) levels to process. It always succeeds. The buffer
1546 * may be dropped during processing for congestion control or by the
1550 * NET_RX_SUCCESS (no congestion)
1551 * NET_RX_CN_LOW (low congestion)
1552 * NET_RX_CN_MOD (moderate congestion)
1553 * NET_RX_CN_HIGH (high congestion)
1554 * NET_RX_DROP (packet was dropped)
1558 int netif_rx(struct sk_buff *skb)
1561 struct softnet_data *queue;
1562 unsigned long flags;
1564 #ifdef CONFIG_NETPOLL
1565 if (skb->dev->netpoll_rx && netpoll_rx(skb)) {
1571 if (!skb->stamp.tv_sec)
1572 net_timestamp(&skb->stamp);
1575 * The code is rearranged so that the path is the most
1576 * short when CPU is congested, but is still operating.
1578 local_irq_save(flags);
1579 this_cpu = smp_processor_id();
1580 queue = &__get_cpu_var(softnet_data);
1582 __get_cpu_var(netdev_rx_stat).total++;
1583 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1584 if (queue->input_pkt_queue.qlen) {
1585 if (queue->throttle)
1590 __skb_queue_tail(&queue->input_pkt_queue, skb);
1591 #ifndef OFFLINE_SAMPLE
1592 get_sample_stats(this_cpu);
1594 local_irq_restore(flags);
1595 return queue->cng_level;
1598 if (queue->throttle) {
1599 queue->throttle = 0;
1600 #ifdef CONFIG_NET_HW_FLOWCONTROL
1601 if (atomic_dec_and_test(&netdev_dropping))
1606 netif_rx_schedule(&queue->backlog_dev);
1610 if (!queue->throttle) {
1611 queue->throttle = 1;
1612 __get_cpu_var(netdev_rx_stat).throttled++;
1613 #ifdef CONFIG_NET_HW_FLOWCONTROL
1614 atomic_inc(&netdev_dropping);
1619 __get_cpu_var(netdev_rx_stat).dropped++;
1620 local_irq_restore(flags);
1626 static __inline__ void skb_bond(struct sk_buff *skb)
1628 struct net_device *dev = skb->dev;
1631 skb->real_dev = skb->dev;
1632 skb->dev = dev->master;
1636 static void net_tx_action(struct softirq_action *h)
1638 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1640 if (sd->completion_queue) {
1641 struct sk_buff *clist;
1643 local_irq_disable();
1644 clist = sd->completion_queue;
1645 sd->completion_queue = NULL;
1649 struct sk_buff *skb = clist;
1650 clist = clist->next;
1652 BUG_TRAP(!atomic_read(&skb->users));
1657 if (sd->output_queue) {
1658 struct net_device *head;
1660 local_irq_disable();
1661 head = sd->output_queue;
1662 sd->output_queue = NULL;
1666 struct net_device *dev = head;
1667 head = head->next_sched;
1669 smp_mb__before_clear_bit();
1670 clear_bit(__LINK_STATE_SCHED, &dev->state);
1672 if (spin_trylock(&dev->queue_lock)) {
1674 spin_unlock(&dev->queue_lock);
1676 netif_schedule(dev);
1682 static __inline__ int deliver_skb(struct sk_buff *skb,
1683 struct packet_type *pt_prev, int last)
1685 atomic_inc(&skb->users);
1686 return pt_prev->func(skb, skb->dev, pt_prev);
1690 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
1691 int (*br_handle_frame_hook)(struct sk_buff *skb);
1693 static __inline__ int handle_bridge(struct sk_buff *skb,
1694 struct packet_type *pt_prev)
1696 int ret = NET_RX_DROP;
1698 ret = deliver_skb(skb, pt_prev, 0);
1705 static inline int __handle_bridge(struct sk_buff *skb,
1706 struct packet_type **pt_prev, int *ret)
1708 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
1709 if (skb->dev->br_port && skb->pkt_type != PACKET_LOOPBACK) {
1710 *ret = handle_bridge(skb, *pt_prev);
1711 if (br_handle_frame_hook(skb) == 0)
1721 #ifdef CONFIG_NET_CLS_ACT
1722 /* TODO: Maybe we should just force sch_ingress to be compiled in
1723 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
1724 * a compare and 2 stores extra right now if we dont have it on
1725 * but have CONFIG_NET_CLS_ACT
1726 * NOTE: This doesnt stop any functionality; if you dont have
1727 * the ingress scheduler, you just cant add policies on ingress.
1730 int ing_filter(struct sk_buff *skb)
1733 struct net_device *dev = skb->dev;
1734 int result = TC_ACT_OK;
1736 if (dev->qdisc_ingress) {
1737 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
1738 if (MAX_RED_LOOP < ttl++) {
1739 printk("Redir loop detected Dropping packet (%s->%s)\n",
1740 skb->input_dev?skb->input_dev->name:"??",skb->dev->name);
1744 skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl);
1746 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
1747 if (NULL == skb->input_dev) {
1748 skb->input_dev = skb->dev;
1749 printk("ing_filter: fixed %s out %s\n",skb->input_dev->name,skb->dev->name);
1751 spin_lock(&dev->ingress_lock);
1752 if ((q = dev->qdisc_ingress) != NULL)
1753 result = q->enqueue(skb, q);
1754 spin_unlock(&dev->ingress_lock);
1762 int netif_receive_skb(struct sk_buff *skb)
1764 struct packet_type *ptype, *pt_prev;
1765 int ret = NET_RX_DROP;
1766 unsigned short type;
1768 #ifdef CONFIG_NETPOLL
1769 if (skb->dev->netpoll_rx && skb->dev->poll && netpoll_rx(skb)) {
1775 if (!skb->stamp.tv_sec)
1776 net_timestamp(&skb->stamp);
1780 __get_cpu_var(netdev_rx_stat).total++;
1782 skb->h.raw = skb->nh.raw = skb->data;
1783 skb->mac_len = skb->nh.raw - skb->mac.raw;
1786 #ifdef CONFIG_NET_CLS_ACT
1787 if (skb->tc_verd & TC_NCLS) {
1788 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
1795 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1796 if (!ptype->dev || ptype->dev == skb->dev) {
1798 ret = deliver_skb(skb, pt_prev, 0);
1803 #ifdef CONFIG_NET_CLS_ACT
1805 atomic_inc(&skb->users);
1806 ret = pt_prev->func(skb, skb->dev, pt_prev);
1807 pt_prev = NULL; /* noone else should process this after*/
1809 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
1812 ret = ing_filter(skb);
1814 if (ret == TC_ACT_SHOT || (ret == TC_ACT_STOLEN)) {
1823 handle_diverter(skb);
1825 if (__handle_bridge(skb, &pt_prev, &ret))
1828 type = skb->protocol;
1829 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
1830 if (ptype->type == type &&
1831 (!ptype->dev || ptype->dev == skb->dev)) {
1833 ret = deliver_skb(skb, pt_prev, 0);
1839 ret = pt_prev->func(skb, skb->dev, pt_prev);
1842 /* Jamal, now you will not able to escape explaining
1843 * me how you were going to use this. :-)
1853 static int process_backlog(struct net_device *backlog_dev, int *budget)
1856 int quota = min(backlog_dev->quota, *budget);
1857 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1858 unsigned long start_time = jiffies;
1861 struct sk_buff *skb;
1862 struct net_device *dev;
1864 local_irq_disable();
1865 skb = __skb_dequeue(&queue->input_pkt_queue);
1872 netif_receive_skb(skb);
1878 if (work >= quota || jiffies - start_time > 1)
1881 #ifdef CONFIG_NET_HW_FLOWCONTROL
1882 if (queue->throttle &&
1883 queue->input_pkt_queue.qlen < no_cong_thresh ) {
1884 queue->throttle = 0;
1885 if (atomic_dec_and_test(&netdev_dropping)) {
1893 backlog_dev->quota -= work;
1898 backlog_dev->quota -= work;
1901 list_del(&backlog_dev->poll_list);
1902 smp_mb__before_clear_bit();
1903 netif_poll_enable(backlog_dev);
1905 if (queue->throttle) {
1906 queue->throttle = 0;
1907 #ifdef CONFIG_NET_HW_FLOWCONTROL
1908 if (atomic_dec_and_test(&netdev_dropping))
1916 static void net_rx_action(struct softirq_action *h)
1918 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1919 unsigned long start_time = jiffies;
1920 int budget = netdev_max_backlog;
1923 local_irq_disable();
1925 while (!list_empty(&queue->poll_list)) {
1926 struct net_device *dev;
1928 if (budget <= 0 || jiffies - start_time > 1)
1933 dev = list_entry(queue->poll_list.next,
1934 struct net_device, poll_list);
1936 if (dev->quota <= 0 || dev->poll(dev, &budget)) {
1937 local_irq_disable();
1938 list_del(&dev->poll_list);
1939 list_add_tail(&dev->poll_list, &queue->poll_list);
1941 dev->quota += dev->weight;
1943 dev->quota = dev->weight;
1946 local_irq_disable();
1954 __get_cpu_var(netdev_rx_stat).time_squeeze++;
1955 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
1959 static gifconf_func_t * gifconf_list [NPROTO];
1962 * register_gifconf - register a SIOCGIF handler
1963 * @family: Address family
1964 * @gifconf: Function handler
1966 * Register protocol dependent address dumping routines. The handler
1967 * that is passed must not be freed or reused until it has been replaced
1968 * by another handler.
1970 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
1972 if (family >= NPROTO)
1974 gifconf_list[family] = gifconf;
1980 * Map an interface index to its name (SIOCGIFNAME)
1984 * We need this ioctl for efficient implementation of the
1985 * if_indextoname() function required by the IPv6 API. Without
1986 * it, we would have to search all the interfaces to find a
1990 static int dev_ifname(struct ifreq __user *arg)
1992 struct net_device *dev;
1996 * Fetch the caller's info block.
1999 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2002 read_lock(&dev_base_lock);
2003 dev = __dev_get_by_index(ifr.ifr_ifindex);
2005 read_unlock(&dev_base_lock);
2009 strcpy(ifr.ifr_name, dev->name);
2010 read_unlock(&dev_base_lock);
2012 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2018 * Perform a SIOCGIFCONF call. This structure will change
2019 * size eventually, and there is nothing I can do about it.
2020 * Thus we will need a 'compatibility mode'.
2023 static int dev_ifconf(char __user *arg)
2026 struct net_device *dev;
2033 * Fetch the caller's info block.
2036 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2043 * Loop over the interfaces, and write an info block for each.
2047 for (dev = dev_base; dev; dev = dev->next) {
2048 for (i = 0; i < NPROTO; i++) {
2049 if (gifconf_list[i]) {
2052 done = gifconf_list[i](dev, NULL, 0);
2054 done = gifconf_list[i](dev, pos + total,
2064 * All done. Write the updated control block back to the caller.
2066 ifc.ifc_len = total;
2069 * Both BSD and Solaris return 0 here, so we do too.
2071 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2074 #ifdef CONFIG_PROC_FS
2076 * This is invoked by the /proc filesystem handler to display a device
2079 static __inline__ struct net_device *dev_get_idx(loff_t pos)
2081 struct net_device *dev;
2084 for (i = 0, dev = dev_base; dev && i < pos; ++i, dev = dev->next);
2086 return i == pos ? dev : NULL;
2089 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2091 read_lock(&dev_base_lock);
2092 return *pos ? dev_get_idx(*pos - 1) : SEQ_START_TOKEN;
2095 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2098 return v == SEQ_START_TOKEN ? dev_base : ((struct net_device *)v)->next;
2101 void dev_seq_stop(struct seq_file *seq, void *v)
2103 read_unlock(&dev_base_lock);
2106 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2108 if (dev->get_stats) {
2109 struct net_device_stats *stats = dev->get_stats(dev);
2111 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2112 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2113 dev->name, stats->rx_bytes, stats->rx_packets,
2115 stats->rx_dropped + stats->rx_missed_errors,
2116 stats->rx_fifo_errors,
2117 stats->rx_length_errors + stats->rx_over_errors +
2118 stats->rx_crc_errors + stats->rx_frame_errors,
2119 stats->rx_compressed, stats->multicast,
2120 stats->tx_bytes, stats->tx_packets,
2121 stats->tx_errors, stats->tx_dropped,
2122 stats->tx_fifo_errors, stats->collisions,
2123 stats->tx_carrier_errors +
2124 stats->tx_aborted_errors +
2125 stats->tx_window_errors +
2126 stats->tx_heartbeat_errors,
2127 stats->tx_compressed);
2129 seq_printf(seq, "%6s: No statistics available.\n", dev->name);
2133 * Called from the PROCfs module. This now uses the new arbitrary sized
2134 * /proc/net interface to create /proc/net/dev
2136 static int dev_seq_show(struct seq_file *seq, void *v)
2138 if (v == SEQ_START_TOKEN)
2139 seq_puts(seq, "Inter-| Receive "
2141 " face |bytes packets errs drop fifo frame "
2142 "compressed multicast|bytes packets errs "
2143 "drop fifo colls carrier compressed\n");
2145 dev_seq_printf_stats(seq, v);
2149 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2151 struct netif_rx_stats *rc = NULL;
2153 while (*pos < NR_CPUS)
2154 if (cpu_online(*pos)) {
2155 rc = &per_cpu(netdev_rx_stat, *pos);
2162 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2164 return softnet_get_online(pos);
2167 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2170 return softnet_get_online(pos);
2173 static void softnet_seq_stop(struct seq_file *seq, void *v)
2177 static int softnet_seq_show(struct seq_file *seq, void *v)
2179 struct netif_rx_stats *s = v;
2181 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2182 s->total, s->dropped, s->time_squeeze, s->throttled,
2183 s->fastroute_hit, s->fastroute_success, s->fastroute_defer,
2184 s->fastroute_deferred_out,
2186 s->fastroute_latency_reduction
2194 static struct seq_operations dev_seq_ops = {
2195 .start = dev_seq_start,
2196 .next = dev_seq_next,
2197 .stop = dev_seq_stop,
2198 .show = dev_seq_show,
2201 static int dev_seq_open(struct inode *inode, struct file *file)
2203 return seq_open(file, &dev_seq_ops);
2206 static struct file_operations dev_seq_fops = {
2207 .owner = THIS_MODULE,
2208 .open = dev_seq_open,
2210 .llseek = seq_lseek,
2211 .release = seq_release,
2214 static struct seq_operations softnet_seq_ops = {
2215 .start = softnet_seq_start,
2216 .next = softnet_seq_next,
2217 .stop = softnet_seq_stop,
2218 .show = softnet_seq_show,
2221 static int softnet_seq_open(struct inode *inode, struct file *file)
2223 return seq_open(file, &softnet_seq_ops);
2226 static struct file_operations softnet_seq_fops = {
2227 .owner = THIS_MODULE,
2228 .open = softnet_seq_open,
2230 .llseek = seq_lseek,
2231 .release = seq_release,
2235 extern int wireless_proc_init(void);
2237 #define wireless_proc_init() 0
2240 static int __init dev_proc_init(void)
2244 if (!proc_net_fops_create("dev", S_IRUGO, &dev_seq_fops))
2246 if (!proc_net_fops_create("softnet_stat", S_IRUGO, &softnet_seq_fops))
2248 if (wireless_proc_init())
2254 proc_net_remove("softnet_stat");
2256 proc_net_remove("dev");
2260 #define dev_proc_init() 0
2261 #endif /* CONFIG_PROC_FS */
2265 * netdev_set_master - set up master/slave pair
2266 * @slave: slave device
2267 * @master: new master device
2269 * Changes the master device of the slave. Pass %NULL to break the
2270 * bonding. The caller must hold the RTNL semaphore. On a failure
2271 * a negative errno code is returned. On success the reference counts
2272 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2273 * function returns zero.
2275 int netdev_set_master(struct net_device *slave, struct net_device *master)
2277 struct net_device *old = slave->master;
2287 slave->master = master;
2295 slave->flags |= IFF_SLAVE;
2297 slave->flags &= ~IFF_SLAVE;
2299 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2304 * dev_set_promiscuity - update promiscuity count on a device
2308 * Add or remove promsicuity from a device. While the count in the device
2309 * remains above zero the interface remains promiscuous. Once it hits zero
2310 * the device reverts back to normal filtering operation. A negative inc
2311 * value is used to drop promiscuity on the device.
2313 void dev_set_promiscuity(struct net_device *dev, int inc)
2315 unsigned short old_flags = dev->flags;
2317 dev->flags |= IFF_PROMISC;
2318 if ((dev->promiscuity += inc) == 0)
2319 dev->flags &= ~IFF_PROMISC;
2320 if (dev->flags ^ old_flags) {
2322 printk(KERN_INFO "device %s %s promiscuous mode\n",
2323 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2329 * dev_set_allmulti - update allmulti count on a device
2333 * Add or remove reception of all multicast frames to a device. While the
2334 * count in the device remains above zero the interface remains listening
2335 * to all interfaces. Once it hits zero the device reverts back to normal
2336 * filtering operation. A negative @inc value is used to drop the counter
2337 * when releasing a resource needing all multicasts.
2340 void dev_set_allmulti(struct net_device *dev, int inc)
2342 unsigned short old_flags = dev->flags;
2344 dev->flags |= IFF_ALLMULTI;
2345 if ((dev->allmulti += inc) == 0)
2346 dev->flags &= ~IFF_ALLMULTI;
2347 if (dev->flags ^ old_flags)
2351 unsigned dev_get_flags(const struct net_device *dev)
2355 flags = (dev->flags & ~(IFF_PROMISC |
2358 (dev->gflags & (IFF_PROMISC |
2361 if (netif_running(dev) && netif_carrier_ok(dev))
2362 flags |= IFF_RUNNING;
2367 int dev_change_flags(struct net_device *dev, unsigned flags)
2370 int old_flags = dev->flags;
2373 * Set the flags on our device.
2376 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
2377 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
2379 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
2383 * Load in the correct multicast list now the flags have changed.
2389 * Have we downed the interface. We handle IFF_UP ourselves
2390 * according to user attempts to set it, rather than blindly
2395 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
2396 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
2402 if (dev->flags & IFF_UP &&
2403 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
2405 notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
2407 if ((flags ^ dev->gflags) & IFF_PROMISC) {
2408 int inc = (flags & IFF_PROMISC) ? +1 : -1;
2409 dev->gflags ^= IFF_PROMISC;
2410 dev_set_promiscuity(dev, inc);
2413 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
2414 is important. Some (broken) drivers set IFF_PROMISC, when
2415 IFF_ALLMULTI is requested not asking us and not reporting.
2417 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
2418 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
2419 dev->gflags ^= IFF_ALLMULTI;
2420 dev_set_allmulti(dev, inc);
2423 if (old_flags ^ dev->flags)
2424 rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags ^ dev->flags);
2429 int dev_set_mtu(struct net_device *dev, int new_mtu)
2433 if (new_mtu == dev->mtu)
2436 /* MTU must be positive. */
2440 if (!netif_device_present(dev))
2444 if (dev->change_mtu)
2445 err = dev->change_mtu(dev, new_mtu);
2448 if (!err && dev->flags & IFF_UP)
2449 notifier_call_chain(&netdev_chain,
2450 NETDEV_CHANGEMTU, dev);
2456 * Perform the SIOCxIFxxx calls.
2458 static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
2461 struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
2467 case SIOCGIFFLAGS: /* Get interface flags */
2468 ifr->ifr_flags = dev_get_flags(dev);
2471 case SIOCSIFFLAGS: /* Set interface flags */
2472 return dev_change_flags(dev, ifr->ifr_flags);
2474 case SIOCGIFMETRIC: /* Get the metric on the interface
2475 (currently unused) */
2476 ifr->ifr_metric = 0;
2479 case SIOCSIFMETRIC: /* Set the metric on the interface
2480 (currently unused) */
2483 case SIOCGIFMTU: /* Get the MTU of a device */
2484 ifr->ifr_mtu = dev->mtu;
2487 case SIOCSIFMTU: /* Set the MTU of a device */
2488 return dev_set_mtu(dev, ifr->ifr_mtu);
2491 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
2492 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2493 ifr->ifr_hwaddr.sa_family = dev->type;
2497 if (!dev->set_mac_address)
2499 if (ifr->ifr_hwaddr.sa_family != dev->type)
2501 if (!netif_device_present(dev))
2503 err = dev->set_mac_address(dev, &ifr->ifr_hwaddr);
2505 notifier_call_chain(&netdev_chain,
2506 NETDEV_CHANGEADDR, dev);
2509 case SIOCSIFHWBROADCAST:
2510 if (ifr->ifr_hwaddr.sa_family != dev->type)
2512 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
2513 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2514 notifier_call_chain(&netdev_chain,
2515 NETDEV_CHANGEADDR, dev);
2519 ifr->ifr_map.mem_start = dev->mem_start;
2520 ifr->ifr_map.mem_end = dev->mem_end;
2521 ifr->ifr_map.base_addr = dev->base_addr;
2522 ifr->ifr_map.irq = dev->irq;
2523 ifr->ifr_map.dma = dev->dma;
2524 ifr->ifr_map.port = dev->if_port;
2528 if (dev->set_config) {
2529 if (!netif_device_present(dev))
2531 return dev->set_config(dev, &ifr->ifr_map);
2536 if (!dev->set_multicast_list ||
2537 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2539 if (!netif_device_present(dev))
2541 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
2545 if (!dev->set_multicast_list ||
2546 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2548 if (!netif_device_present(dev))
2550 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
2554 ifr->ifr_ifindex = dev->ifindex;
2558 ifr->ifr_qlen = dev->tx_queue_len;
2562 if (ifr->ifr_qlen < 0)
2564 dev->tx_queue_len = ifr->ifr_qlen;
2568 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
2569 return dev_change_name(dev, ifr->ifr_newname);
2572 * Unknown or private ioctl
2576 if ((cmd >= SIOCDEVPRIVATE &&
2577 cmd <= SIOCDEVPRIVATE + 15) ||
2578 cmd == SIOCBONDENSLAVE ||
2579 cmd == SIOCBONDRELEASE ||
2580 cmd == SIOCBONDSETHWADDR ||
2581 cmd == SIOCBONDSLAVEINFOQUERY ||
2582 cmd == SIOCBONDINFOQUERY ||
2583 cmd == SIOCBONDCHANGEACTIVE ||
2584 cmd == SIOCGMIIPHY ||
2585 cmd == SIOCGMIIREG ||
2586 cmd == SIOCSMIIREG ||
2587 cmd == SIOCBRADDIF ||
2588 cmd == SIOCBRDELIF ||
2589 cmd == SIOCWANDEV) {
2591 if (dev->do_ioctl) {
2592 if (netif_device_present(dev))
2593 err = dev->do_ioctl(dev, ifr,
2606 * This function handles all "interface"-type I/O control requests. The actual
2607 * 'doing' part of this is dev_ifsioc above.
2611 * dev_ioctl - network device ioctl
2612 * @cmd: command to issue
2613 * @arg: pointer to a struct ifreq in user space
2615 * Issue ioctl functions to devices. This is normally called by the
2616 * user space syscall interfaces but can sometimes be useful for
2617 * other purposes. The return value is the return from the syscall if
2618 * positive or a negative errno code on error.
2621 int dev_ioctl(unsigned int cmd, void __user *arg)
2627 /* One special case: SIOCGIFCONF takes ifconf argument
2628 and requires shared lock, because it sleeps writing
2632 if (cmd == SIOCGIFCONF) {
2634 ret = dev_ifconf((char __user *) arg);
2638 if (cmd == SIOCGIFNAME)
2639 return dev_ifname((struct ifreq __user *)arg);
2641 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2644 ifr.ifr_name[IFNAMSIZ-1] = 0;
2646 colon = strchr(ifr.ifr_name, ':');
2651 * See which interface the caller is talking about.
2656 * These ioctl calls:
2657 * - can be done by all.
2658 * - atomic and do not require locking.
2669 dev_load(ifr.ifr_name);
2670 read_lock(&dev_base_lock);
2671 ret = dev_ifsioc(&ifr, cmd);
2672 read_unlock(&dev_base_lock);
2676 if (copy_to_user(arg, &ifr,
2677 sizeof(struct ifreq)))
2683 dev_load(ifr.ifr_name);
2685 ret = dev_ethtool(&ifr);
2690 if (copy_to_user(arg, &ifr,
2691 sizeof(struct ifreq)))
2697 * These ioctl calls:
2698 * - require superuser power.
2699 * - require strict serialization.
2705 if (!capable(CAP_NET_ADMIN))
2707 dev_load(ifr.ifr_name);
2709 ret = dev_ifsioc(&ifr, cmd);
2714 if (copy_to_user(arg, &ifr,
2715 sizeof(struct ifreq)))
2721 * These ioctl calls:
2722 * - require superuser power.
2723 * - require strict serialization.
2724 * - do not return a value
2734 case SIOCSIFHWBROADCAST:
2737 case SIOCBONDENSLAVE:
2738 case SIOCBONDRELEASE:
2739 case SIOCBONDSETHWADDR:
2740 case SIOCBONDSLAVEINFOQUERY:
2741 case SIOCBONDINFOQUERY:
2742 case SIOCBONDCHANGEACTIVE:
2745 if (!capable(CAP_NET_ADMIN))
2747 dev_load(ifr.ifr_name);
2749 ret = dev_ifsioc(&ifr, cmd);
2754 /* Get the per device memory space. We can add this but
2755 * currently do not support it */
2757 /* Set the per device memory buffer space.
2758 * Not applicable in our case */
2763 * Unknown or private ioctl.
2766 if (cmd == SIOCWANDEV ||
2767 (cmd >= SIOCDEVPRIVATE &&
2768 cmd <= SIOCDEVPRIVATE + 15)) {
2769 dev_load(ifr.ifr_name);
2771 ret = dev_ifsioc(&ifr, cmd);
2773 if (!ret && copy_to_user(arg, &ifr,
2774 sizeof(struct ifreq)))
2779 /* Take care of Wireless Extensions */
2780 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
2781 /* If command is `set a parameter', or
2782 * `get the encoding parameters', check if
2783 * the user has the right to do it */
2784 if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE) {
2785 if (!capable(CAP_NET_ADMIN))
2788 dev_load(ifr.ifr_name);
2790 /* Follow me in net/core/wireless.c */
2791 ret = wireless_process_ioctl(&ifr, cmd);
2793 if (!ret && IW_IS_GET(cmd) &&
2794 copy_to_user(arg, &ifr,
2795 sizeof(struct ifreq)))
2799 #endif /* WIRELESS_EXT */
2806 * dev_new_index - allocate an ifindex
2808 * Returns a suitable unique value for a new device interface
2809 * number. The caller must hold the rtnl semaphore or the
2810 * dev_base_lock to be sure it remains unique.
2812 int dev_new_index(void)
2818 if (!__dev_get_by_index(ifindex))
2823 static int dev_boot_phase = 1;
2825 /* Delayed registration/unregisteration */
2826 static spinlock_t net_todo_list_lock = SPIN_LOCK_UNLOCKED;
2827 static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);
2829 static inline void net_set_todo(struct net_device *dev)
2831 spin_lock(&net_todo_list_lock);
2832 list_add_tail(&dev->todo_list, &net_todo_list);
2833 spin_unlock(&net_todo_list_lock);
2837 * register_netdevice - register a network device
2838 * @dev: device to register
2840 * Take a completed network device structure and add it to the kernel
2841 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
2842 * chain. 0 is returned on success. A negative errno code is returned
2843 * on a failure to set up the device, or if the name is a duplicate.
2845 * Callers must hold the rtnl semaphore. See the comment at the
2846 * end of Space.c for details about the locking. You may want
2847 * register_netdev() instead of this.
2850 * The locking appears insufficient to guarantee two parallel registers
2851 * will not get the same name.
2854 int register_netdevice(struct net_device *dev)
2856 struct hlist_head *head;
2857 struct hlist_node *p;
2860 BUG_ON(dev_boot_phase);
2863 /* When net_device's are persistent, this will be fatal. */
2864 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
2866 spin_lock_init(&dev->queue_lock);
2867 spin_lock_init(&dev->xmit_lock);
2868 dev->xmit_lock_owner = -1;
2869 #ifdef CONFIG_NET_CLS_ACT
2870 spin_lock_init(&dev->ingress_lock);
2873 ret = alloc_divert_blk(dev);
2879 /* Init, if this function is available */
2881 ret = dev->init(dev);
2889 if (!dev_valid_name(dev->name)) {
2894 dev->ifindex = dev_new_index();
2895 if (dev->iflink == -1)
2896 dev->iflink = dev->ifindex;
2898 /* Check for existence of name */
2899 head = dev_name_hash(dev->name);
2900 hlist_for_each(p, head) {
2901 struct net_device *d
2902 = hlist_entry(p, struct net_device, name_hlist);
2903 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
2909 /* Fix illegal SG+CSUM combinations. */
2910 if ((dev->features & NETIF_F_SG) &&
2911 !(dev->features & (NETIF_F_IP_CSUM |
2913 NETIF_F_HW_CSUM))) {
2914 printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
2916 dev->features &= ~NETIF_F_SG;
2920 * nil rebuild_header routine,
2921 * that should be never called and used as just bug trap.
2924 if (!dev->rebuild_header)
2925 dev->rebuild_header = default_rebuild_header;
2928 * Default initial state at registry is that the
2929 * device is present.
2932 set_bit(__LINK_STATE_PRESENT, &dev->state);
2935 dev_init_scheduler(dev);
2936 write_lock_bh(&dev_base_lock);
2938 dev_tail = &dev->next;
2939 hlist_add_head(&dev->name_hlist, head);
2940 hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex));
2942 dev->reg_state = NETREG_REGISTERING;
2943 write_unlock_bh(&dev_base_lock);
2945 /* Notify protocols, that a new device appeared. */
2946 notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
2948 /* Finish registration after unlock */
2955 free_divert_blk(dev);
2960 * netdev_wait_allrefs - wait until all references are gone.
2962 * This is called when unregistering network devices.
2964 * Any protocol or device that holds a reference should register
2965 * for netdevice notification, and cleanup and put back the
2966 * reference if they receive an UNREGISTER event.
2967 * We can get stuck here if buggy protocols don't correctly
2970 static void netdev_wait_allrefs(struct net_device *dev)
2972 unsigned long rebroadcast_time, warning_time;
2974 rebroadcast_time = warning_time = jiffies;
2975 while (atomic_read(&dev->refcnt) != 0) {
2976 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
2979 /* Rebroadcast unregister notification */
2980 notifier_call_chain(&netdev_chain,
2981 NETDEV_UNREGISTER, dev);
2983 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
2985 /* We must not have linkwatch events
2986 * pending on unregister. If this
2987 * happens, we simply run the queue
2988 * unscheduled, resulting in a noop
2991 linkwatch_run_queue();
2996 rebroadcast_time = jiffies;
2999 current->state = TASK_INTERRUPTIBLE;
3000 schedule_timeout(HZ / 4);
3002 if (time_after(jiffies, warning_time + 10 * HZ)) {
3003 printk(KERN_EMERG "unregister_netdevice: "
3004 "waiting for %s to become free. Usage "
3006 dev->name, atomic_read(&dev->refcnt));
3007 warning_time = jiffies;
3016 * register_netdevice(x1);
3017 * register_netdevice(x2);
3019 * unregister_netdevice(y1);
3020 * unregister_netdevice(y2);
3026 * We are invoked by rtnl_unlock() after it drops the semaphore.
3027 * This allows us to deal with problems:
3028 * 1) We can create/delete sysfs objects which invoke hotplug
3029 * without deadlocking with linkwatch via keventd.
3030 * 2) Since we run with the RTNL semaphore not held, we can sleep
3031 * safely in order to wait for the netdev refcnt to drop to zero.
3033 static DECLARE_MUTEX(net_todo_run_mutex);
3034 void netdev_run_todo(void)
3036 struct list_head list = LIST_HEAD_INIT(list);
3040 /* Need to guard against multiple cpu's getting out of order. */
3041 down(&net_todo_run_mutex);
3043 /* Not safe to do outside the semaphore. We must not return
3044 * until all unregister events invoked by the local processor
3045 * have been completed (either by this todo run, or one on
3048 if (list_empty(&net_todo_list))
3051 /* Snapshot list, allow later requests */
3052 spin_lock(&net_todo_list_lock);
3053 list_splice_init(&net_todo_list, &list);
3054 spin_unlock(&net_todo_list_lock);
3056 while (!list_empty(&list)) {
3057 struct net_device *dev
3058 = list_entry(list.next, struct net_device, todo_list);
3059 list_del(&dev->todo_list);
3061 switch(dev->reg_state) {
3062 case NETREG_REGISTERING:
3063 err = netdev_register_sysfs(dev);
3065 printk(KERN_ERR "%s: failed sysfs registration (%d)\n",
3067 dev->reg_state = NETREG_REGISTERED;
3070 case NETREG_UNREGISTERING:
3071 netdev_unregister_sysfs(dev);
3072 dev->reg_state = NETREG_UNREGISTERED;
3074 netdev_wait_allrefs(dev);
3077 BUG_ON(atomic_read(&dev->refcnt));
3078 BUG_TRAP(!dev->ip_ptr);
3079 BUG_TRAP(!dev->ip6_ptr);
3080 BUG_TRAP(!dev->dn_ptr);
3083 /* It must be the very last action,
3084 * after this 'dev' may point to freed up memory.
3086 if (dev->destructor)
3087 dev->destructor(dev);
3091 printk(KERN_ERR "network todo '%s' but state %d\n",
3092 dev->name, dev->reg_state);
3098 up(&net_todo_run_mutex);
3102 * free_netdev - free network device
3105 * This function does the last stage of destroying an allocated device
3106 * interface. The reference to the device object is released.
3107 * If this is the last reference then it will be freed.
3109 void free_netdev(struct net_device *dev)
3112 /* Compatiablity with error handling in drivers */
3113 if (dev->reg_state == NETREG_UNINITIALIZED) {
3114 kfree((char *)dev - dev->padded);
3118 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
3119 dev->reg_state = NETREG_RELEASED;
3121 /* will free via class release */
3122 class_device_put(&dev->class_dev);
3124 kfree((char *)dev - dev->padded);
3128 /* Synchronize with packet receive processing. */
3129 void synchronize_net(void)
3132 synchronize_kernel();
3136 * unregister_netdevice - remove device from the kernel
3139 * This function shuts down a device interface and removes it
3140 * from the kernel tables. On success 0 is returned, on a failure
3141 * a negative errno code is returned.
3143 * Callers must hold the rtnl semaphore. See the comment at the
3144 * end of Space.c for details about the locking. You may want
3145 * unregister_netdev() instead of this.
3148 int unregister_netdevice(struct net_device *dev)
3150 struct net_device *d, **dp;
3152 BUG_ON(dev_boot_phase);
3155 /* Some devices call without registering for initialization unwind. */
3156 if (dev->reg_state == NETREG_UNINITIALIZED) {
3157 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3158 "was registered\n", dev->name, dev);
3162 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3164 /* If device is running, close it first. */
3165 if (dev->flags & IFF_UP)
3168 /* And unlink it from device chain. */
3169 for (dp = &dev_base; (d = *dp) != NULL; dp = &d->next) {
3171 write_lock_bh(&dev_base_lock);
3172 hlist_del(&dev->name_hlist);
3173 hlist_del(&dev->index_hlist);
3174 if (dev_tail == &dev->next)
3177 write_unlock_bh(&dev_base_lock);
3182 printk(KERN_ERR "unregister net_device: '%s' not found\n",
3187 dev->reg_state = NETREG_UNREGISTERING;
3191 /* Shutdown queueing discipline. */
3195 /* Notify protocols, that we are about to destroy
3196 this device. They should clean all the things.
3198 notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
3201 * Flush the multicast chain
3203 dev_mc_discard(dev);
3208 /* Notifier chain MUST detach us from master device. */
3209 BUG_TRAP(!dev->master);
3211 free_divert_blk(dev);
3213 /* Finish processing unregister after unlock */
3222 #ifdef CONFIG_HOTPLUG_CPU
3223 static int dev_cpu_callback(struct notifier_block *nfb,
3224 unsigned long action,
3227 struct sk_buff **list_skb;
3228 struct net_device **list_net;
3229 struct sk_buff *skb;
3230 unsigned int cpu, oldcpu = (unsigned long)ocpu;
3231 struct softnet_data *sd, *oldsd;
3233 if (action != CPU_DEAD)
3236 local_irq_disable();
3237 cpu = smp_processor_id();
3238 sd = &per_cpu(softnet_data, cpu);
3239 oldsd = &per_cpu(softnet_data, oldcpu);
3241 /* Find end of our completion_queue. */
3242 list_skb = &sd->completion_queue;
3244 list_skb = &(*list_skb)->next;
3245 /* Append completion queue from offline CPU. */
3246 *list_skb = oldsd->completion_queue;
3247 oldsd->completion_queue = NULL;
3249 /* Find end of our output_queue. */
3250 list_net = &sd->output_queue;
3252 list_net = &(*list_net)->next_sched;
3253 /* Append output queue from offline CPU. */
3254 *list_net = oldsd->output_queue;
3255 oldsd->output_queue = NULL;
3257 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3260 /* Process offline CPU's input_pkt_queue */
3261 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
3266 #endif /* CONFIG_HOTPLUG_CPU */
3270 * Initialize the DEV module. At boot time this walks the device list and
3271 * unhooks any devices that fail to initialise (normally hardware not
3272 * present) and leaves us with a valid list of present and active devices.
3277 * This is called single threaded during boot, so no need
3278 * to take the rtnl semaphore.
3280 static int __init net_dev_init(void)
3282 int i, rc = -ENOMEM;
3284 BUG_ON(!dev_boot_phase);
3286 if (dev_proc_init())
3289 if (netdev_sysfs_init())
3292 INIT_LIST_HEAD(&ptype_all);
3293 for (i = 0; i < 16; i++)
3294 INIT_LIST_HEAD(&ptype_base[i]);
3296 for (i = 0; i < ARRAY_SIZE(dev_name_head); i++)
3297 INIT_HLIST_HEAD(&dev_name_head[i]);
3299 for (i = 0; i < ARRAY_SIZE(dev_index_head); i++)
3300 INIT_HLIST_HEAD(&dev_index_head[i]);
3303 * Initialise the packet receive queues.
3306 for (i = 0; i < NR_CPUS; i++) {
3307 struct softnet_data *queue;
3309 queue = &per_cpu(softnet_data, i);
3310 skb_queue_head_init(&queue->input_pkt_queue);
3311 queue->throttle = 0;
3312 queue->cng_level = 0;
3313 queue->avg_blog = 10; /* arbitrary non-zero */
3314 queue->completion_queue = NULL;
3315 INIT_LIST_HEAD(&queue->poll_list);
3316 set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
3317 queue->backlog_dev.weight = weight_p;
3318 queue->backlog_dev.poll = process_backlog;
3319 atomic_set(&queue->backlog_dev.refcnt, 1);
3322 #ifdef OFFLINE_SAMPLE
3323 samp_timer.expires = jiffies + (10 * HZ);
3324 add_timer(&samp_timer);
3329 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
3330 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
3332 hotcpu_notifier(dev_cpu_callback, 0);
3340 subsys_initcall(net_dev_init);
3342 EXPORT_SYMBOL(__dev_get);
3343 EXPORT_SYMBOL(__dev_get_by_flags);
3344 EXPORT_SYMBOL(__dev_get_by_index);
3345 EXPORT_SYMBOL(__dev_get_by_name);
3346 EXPORT_SYMBOL(__dev_remove_pack);
3347 EXPORT_SYMBOL(__skb_linearize);
3348 EXPORT_SYMBOL(call_netdevice_notifiers);
3349 EXPORT_SYMBOL(dev_add_pack);
3350 EXPORT_SYMBOL(dev_alloc_name);
3351 EXPORT_SYMBOL(dev_close);
3352 EXPORT_SYMBOL(dev_get_by_flags);
3353 EXPORT_SYMBOL(dev_get_by_index);
3354 EXPORT_SYMBOL(dev_get_by_name);
3355 EXPORT_SYMBOL(dev_getbyhwaddr);
3356 EXPORT_SYMBOL(dev_ioctl);
3357 EXPORT_SYMBOL(dev_new_index);
3358 EXPORT_SYMBOL(dev_open);
3359 EXPORT_SYMBOL(dev_queue_xmit);
3360 EXPORT_SYMBOL(dev_queue_xmit_nit);
3361 EXPORT_SYMBOL(dev_remove_pack);
3362 EXPORT_SYMBOL(dev_set_allmulti);
3363 EXPORT_SYMBOL(dev_set_promiscuity);
3364 EXPORT_SYMBOL(dev_change_flags);
3365 EXPORT_SYMBOL(dev_set_mtu);
3366 EXPORT_SYMBOL(free_netdev);
3367 EXPORT_SYMBOL(netdev_boot_setup_check);
3368 EXPORT_SYMBOL(netdev_set_master);
3369 EXPORT_SYMBOL(netdev_state_change);
3370 EXPORT_SYMBOL(netif_receive_skb);
3371 EXPORT_SYMBOL(netif_rx);
3372 EXPORT_SYMBOL(register_gifconf);
3373 EXPORT_SYMBOL(register_netdevice);
3374 EXPORT_SYMBOL(register_netdevice_notifier);
3375 EXPORT_SYMBOL(skb_checksum_help);
3376 EXPORT_SYMBOL(synchronize_net);
3377 EXPORT_SYMBOL(unregister_netdevice);
3378 EXPORT_SYMBOL(unregister_netdevice_notifier);
3380 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
3381 EXPORT_SYMBOL(br_handle_frame_hook);
3385 EXPORT_SYMBOL(dev_load);
3387 #ifdef CONFIG_NET_HW_FLOWCONTROL
3388 EXPORT_SYMBOL(netdev_dropping);
3389 EXPORT_SYMBOL(netdev_fc_xoff);
3390 EXPORT_SYMBOL(netdev_register_fc);
3391 EXPORT_SYMBOL(netdev_unregister_fc);
3394 #ifdef CONFIG_NET_CLS_ACT
3395 EXPORT_SYMBOL(ing_filter);
3399 EXPORT_PER_CPU_SYMBOL(softnet_data);