1 diff -Nurb linux-2.6.27.10-clickport/net/core/dev.c trellis2/net/core/dev.c
2 --- linux-2.6.27.10-clickport/net/core/dev.c 2009-01-27 16:02:45.000000000 -0500
3 +++ trellis2/net/core/dev.c 2009-02-12 11:00:46.000000000 -0500
7 for_each_netdev(net, dev) {
8 - if (!nx_dev_visible(current->nx_info, dev))
9 + if (net==&init_dev && !nx_dev_visible(current->nx_info, dev))
11 for (i = 0; i < NPROTO; i++) {
12 if (gifconf_list[i]) {
14 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
16 struct net_device_stats *stats = dev->get_stats(dev);
17 + struct net *net = seq->private;
19 - if (!nx_dev_visible(current->nx_info, dev))
20 + if (net==&init_net && !nx_dev_visible(current->nx_info, dev))
23 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
24 diff -Nurb linux-2.6.27.10-clickport/net/core/dev.c.orig trellis2/net/core/dev.c.orig
25 --- linux-2.6.27.10-clickport/net/core/dev.c.orig 1969-12-31 19:00:00.000000000 -0500
26 +++ trellis2/net/core/dev.c.orig 2009-01-27 16:02:45.000000000 -0500
29 + * NET3 Protocol independent device support routines.
31 + * This program is free software; you can redistribute it and/or
32 + * modify it under the terms of the GNU General Public License
33 + * as published by the Free Software Foundation; either version
34 + * 2 of the License, or (at your option) any later version.
36 + * Derived from the non IP parts of dev.c 1.0.19
37 + * Authors: Ross Biro
38 + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
39 + * Mark Evans, <evansmp@uhura.aston.ac.uk>
41 + * Additional Authors:
42 + * Florian la Roche <rzsfl@rz.uni-sb.de>
43 + * Alan Cox <gw4pts@gw4pts.ampr.org>
44 + * David Hinds <dahinds@users.sourceforge.net>
45 + * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
46 + * Adam Sulmicki <adam@cfar.umd.edu>
47 + * Pekka Riikonen <priikone@poesidon.pspt.fi>
50 + * D.J. Barrow : Fixed bug where dev->refcnt gets set
51 + * to 2 if register_netdev gets called
52 + * before net_dev_init & also removed a
53 + * few lines of code in the process.
54 + * Alan Cox : device private ioctl copies fields back.
55 + * Alan Cox : Transmit queue code does relevant
56 + * stunts to keep the queue safe.
57 + * Alan Cox : Fixed double lock.
58 + * Alan Cox : Fixed promisc NULL pointer trap
59 + * ???????? : Support the full private ioctl range
60 + * Alan Cox : Moved ioctl permission check into
62 + * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
63 + * Alan Cox : 100 backlog just doesn't cut it when
64 + * you start doing multicast video 8)
65 + * Alan Cox : Rewrote net_bh and list manager.
66 + * Alan Cox : Fix ETH_P_ALL echoback lengths.
67 + * Alan Cox : Took out transmit every packet pass
68 + * Saved a few bytes in the ioctl handler
69 + * Alan Cox : Network driver sets packet type before
70 + * calling netif_rx. Saves a function
72 + * Alan Cox : Hashed net_bh()
73 + * Richard Kooijman: Timestamp fixes.
74 + * Alan Cox : Wrong field in SIOCGIFDSTADDR
75 + * Alan Cox : Device lock protection.
76 + * Alan Cox : Fixed nasty side effect of device close
78 + * Rudi Cilibrasi : Pass the right thing to
80 + * Dave Miller : 32bit quantity for the device lock to
81 + * make it work out on a Sparc.
82 + * Bjorn Ekwall : Added KERNELD hack.
83 + * Alan Cox : Cleaned up the backlog initialise.
84 + * Craig Metz : SIOCGIFCONF fix if space for under
86 + * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
87 + * is no device open function.
88 + * Andi Kleen : Fix error reporting for SIOCGIFCONF
89 + * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
90 + * Cyrus Durgin : Cleaned for KMOD
91 + * Adam Sulmicki : Bug Fix : Network Device Unload
92 + * A network device unload needs to purge
93 + * the backlog queue.
94 + * Paul Rusty Russell : SIOCSIFNAME
95 + * Pekka Riikonen : Netdev boot-time settings code
96 + * Andrew Morton : Make unregister_netdevice wait
97 + * indefinitely on dev->refcnt
98 + * J Hadi Salim : - Backlog queue sampling
99 + * - netif_rx() feedback
102 +#include <asm/uaccess.h>
103 +#include <asm/system.h>
104 +#include <linux/bitops.h>
105 +#include <linux/capability.h>
106 +#include <linux/cpu.h>
107 +#include <linux/types.h>
108 +#include <linux/kernel.h>
109 +#include <linux/sched.h>
110 +#include <linux/mutex.h>
111 +#include <linux/string.h>
112 +#include <linux/mm.h>
113 +#include <linux/socket.h>
114 +#include <linux/sockios.h>
115 +#include <linux/errno.h>
116 +#include <linux/interrupt.h>
117 +#include <linux/if_ether.h>
118 +#include <linux/netdevice.h>
119 +#include <linux/etherdevice.h>
120 +#include <linux/ethtool.h>
121 +#include <linux/notifier.h>
122 +#include <linux/skbuff.h>
123 +#include <net/net_namespace.h>
124 +#include <net/sock.h>
125 +#include <linux/rtnetlink.h>
126 +#include <linux/proc_fs.h>
127 +#include <linux/seq_file.h>
128 +#include <linux/stat.h>
129 +#include <linux/if_bridge.h>
130 +#include <linux/if_macvlan.h>
131 +#include <net/dst.h>
132 +#include <net/pkt_sched.h>
133 +#include <net/checksum.h>
134 +#include <linux/highmem.h>
135 +#include <linux/init.h>
136 +#include <linux/kmod.h>
137 +#include <linux/module.h>
138 +#include <linux/kallsyms.h>
139 +#include <linux/netpoll.h>
140 +#include <linux/rcupdate.h>
141 +#include <linux/delay.h>
142 +#include <net/wext.h>
143 +#include <net/iw_handler.h>
144 +#include <asm/current.h>
145 +#include <linux/audit.h>
146 +#include <linux/dmaengine.h>
147 +#include <linux/err.h>
148 +#include <linux/ctype.h>
149 +#include <linux/if_arp.h>
150 +#include <linux/if_vlan.h>
151 +#include <linux/ip.h>
153 +#include <linux/ipv6.h>
154 +#include <linux/in.h>
155 +#include <linux/jhash.h>
156 +#include <linux/random.h>
157 +#include <linux/vs_inet.h>
159 +#include "net-sysfs.h"
162 + * The list of packet types we will receive (as opposed to discard)
163 + * and the routines to invoke.
165 + * Why 16. Because with 16 the only overlap we get on a hash of the
166 + * low nibble of the protocol value is RARP/SNAP/X.25.
168 + * NOTE: That is no longer true with the addition of VLAN tags. Not
169 + * sure which should go first, but I bet it won't make much
170 + * difference if we are running VLANs. The good news is that
171 + * this protocol won't be in the list unless compiled in, so
172 + * the average user (w/out VLANs) will not be adversely affected.
189 +#define PTYPE_HASH_SIZE (16)
190 +#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
192 +static DEFINE_SPINLOCK(ptype_lock);
193 +static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
194 +static struct list_head ptype_all __read_mostly; /* Taps */
196 +#ifdef CONFIG_NET_DMA
198 + struct dma_client client;
200 + cpumask_t channel_mask;
201 + struct dma_chan **channels;
204 +static enum dma_state_client
205 +netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
206 + enum dma_state state);
208 +static struct net_dma net_dma = {
210 + .event_callback = netdev_dma_event,
216 + * The @dev_base_head list is protected by @dev_base_lock and the rtnl
219 + * Pure readers hold dev_base_lock for reading.
221 + * Writers must hold the rtnl semaphore while they loop through the
222 + * dev_base_head list, and hold dev_base_lock for writing when they do the
223 + * actual updates. This allows pure readers to access the list even
224 + * while a writer is preparing to update it.
226 + * To put it another way, dev_base_lock is held for writing only to
227 + * protect against pure readers; the rtnl semaphore provides the
228 + * protection against other writers.
230 + * See, for example usages, register_netdevice() and
231 + * unregister_netdevice(), which must be called with the rtnl
234 +DEFINE_RWLOCK(dev_base_lock);
236 +EXPORT_SYMBOL(dev_base_lock);
238 +#define NETDEV_HASHBITS 8
239 +#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
241 +static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
243 + unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
244 + return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
247 +static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
249 + return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
252 +/* Device list insertion */
253 +static int list_netdevice(struct net_device *dev)
255 + struct net *net = dev_net(dev);
259 + write_lock_bh(&dev_base_lock);
260 + list_add_tail(&dev->dev_list, &net->dev_base_head);
261 + hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
262 + hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
263 + write_unlock_bh(&dev_base_lock);
267 +/* Device list removal */
268 +static void unlist_netdevice(struct net_device *dev)
272 + /* Unlink dev from the device chain */
273 + write_lock_bh(&dev_base_lock);
274 + list_del(&dev->dev_list);
275 + hlist_del(&dev->name_hlist);
276 + hlist_del(&dev->index_hlist);
277 + write_unlock_bh(&dev_base_lock);
280 +/* Click: input packet handlers, might steal packets from net_rx_action. */
281 +static RAW_NOTIFIER_HEAD(net_in_chain);
284 + * Our notifier list
287 +static RAW_NOTIFIER_HEAD(netdev_chain);
290 + * Device drivers call our routines to queue packets here. We empty the
291 + * queue in the local softnet handler.
294 +DEFINE_PER_CPU(struct softnet_data, softnet_data);
296 +#ifdef CONFIG_LOCKDEP
298 + * register_netdevice() inits txq->_xmit_lock and sets lockdep class
299 + * according to dev->type
301 +static const unsigned short netdev_lock_type[] =
302 + {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
303 + ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
304 + ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
305 + ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
306 + ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
307 + ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
308 + ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
309 + ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
310 + ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
311 + ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
312 + ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
313 + ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
314 + ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
315 + ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
318 +static const char *netdev_lock_name[] =
319 + {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
320 + "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
321 + "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
322 + "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
323 + "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
324 + "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
325 + "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
326 + "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
327 + "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
328 + "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
329 + "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
330 + "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
331 + "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
332 + "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
335 +static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
336 +static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
338 +static inline unsigned short netdev_lock_pos(unsigned short dev_type)
342 + for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
343 + if (netdev_lock_type[i] == dev_type)
345 + /* the last key is used by default */
346 + return ARRAY_SIZE(netdev_lock_type) - 1;
349 +static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
350 + unsigned short dev_type)
354 + i = netdev_lock_pos(dev_type);
355 + lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
356 + netdev_lock_name[i]);
359 +static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
363 + i = netdev_lock_pos(dev->type);
364 + lockdep_set_class_and_name(&dev->addr_list_lock,
365 + &netdev_addr_lock_key[i],
366 + netdev_lock_name[i]);
369 +static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
370 + unsigned short dev_type)
373 +static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
378 +/*******************************************************************************
380 + Protocol management and registration routines
382 +*******************************************************************************/
385 + * Add a protocol ID to the list. Now that the input handler is
386 + * smarter we can dispense with all the messy stuff that used to be
389 + * BEWARE!!! Protocol handlers, mangling input packets,
390 + * MUST BE last in hash buckets and checking protocol handlers
391 + * MUST start from promiscuous ptype_all chain in net_bh.
392 + * It is true now, do not change it.
393 + * Explanation follows: if protocol handler, mangling packet, will
394 + * be the first on list, it is not able to sense, that packet
395 + * is cloned and should be copied-on-write, so that it will
396 + * change it and subsequent readers will get broken packet.
401 + * dev_add_pack - add packet handler
402 + * @pt: packet type declaration
404 + * Add a protocol handler to the networking stack. The passed &packet_type
405 + * is linked into kernel lists and may not be freed until it has been
406 + * removed from the kernel lists.
408 + * This call does not sleep therefore it can not
409 + * guarantee all CPU's that are in middle of receiving packets
410 + * will see the new packet type (until the next received packet).
413 +void dev_add_pack(struct packet_type *pt)
417 + spin_lock_bh(&ptype_lock);
418 + if (pt->type == htons(ETH_P_ALL))
419 + list_add_rcu(&pt->list, &ptype_all);
421 + hash = ntohs(pt->type) & PTYPE_HASH_MASK;
422 + list_add_rcu(&pt->list, &ptype_base[hash]);
424 + spin_unlock_bh(&ptype_lock);
428 + * __dev_remove_pack - remove packet handler
429 + * @pt: packet type declaration
431 + * Remove a protocol handler that was previously added to the kernel
432 + * protocol handlers by dev_add_pack(). The passed &packet_type is removed
433 + * from the kernel lists and can be freed or reused once this function
436 + * The packet type might still be in use by receivers
437 + * and must not be freed until after all the CPU's have gone
438 + * through a quiescent state.
440 +void __dev_remove_pack(struct packet_type *pt)
442 + struct list_head *head;
443 + struct packet_type *pt1;
445 + spin_lock_bh(&ptype_lock);
447 + if (pt->type == htons(ETH_P_ALL))
450 + head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
452 + list_for_each_entry(pt1, head, list) {
454 + list_del_rcu(&pt->list);
459 + printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
461 + spin_unlock_bh(&ptype_lock);
464 + * dev_remove_pack - remove packet handler
465 + * @pt: packet type declaration
467 + * Remove a protocol handler that was previously added to the kernel
468 + * protocol handlers by dev_add_pack(). The passed &packet_type is removed
469 + * from the kernel lists and can be freed or reused once this function
472 + * This call sleeps to guarantee that no CPU is looking at the packet
473 + * type after return.
475 +void dev_remove_pack(struct packet_type *pt)
477 + __dev_remove_pack(pt);
482 +/******************************************************************************
484 + Device Boot-time Settings Routines
486 +*******************************************************************************/
488 +/* Boot time configuration table */
489 +static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
492 + * netdev_boot_setup_add - add new setup entry
493 + * @name: name of the device
494 + * @map: configured settings for the device
496 + * Adds new setup entry to the dev_boot_setup list. The function
497 + * returns 0 on error and 1 on success. This is a generic routine to
500 +static int netdev_boot_setup_add(char *name, struct ifmap *map)
502 + struct netdev_boot_setup *s;
505 + s = dev_boot_setup;
506 + for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
507 + if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
508 + memset(s[i].name, 0, sizeof(s[i].name));
509 + strlcpy(s[i].name, name, IFNAMSIZ);
510 + memcpy(&s[i].map, map, sizeof(s[i].map));
515 + return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
519 + * netdev_boot_setup_check - check boot time settings
520 + * @dev: the netdevice
522 + * Check boot time settings for the device.
523 + * The found settings are set for the device to be used
524 + * later in the device probing.
525 + * Returns 0 if no settings found, 1 if they are.
527 +int netdev_boot_setup_check(struct net_device *dev)
529 + struct netdev_boot_setup *s = dev_boot_setup;
532 + for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
533 + if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
534 + !strcmp(dev->name, s[i].name)) {
535 + dev->irq = s[i].map.irq;
536 + dev->base_addr = s[i].map.base_addr;
537 + dev->mem_start = s[i].map.mem_start;
538 + dev->mem_end = s[i].map.mem_end;
547 + * netdev_boot_base - get address from boot time settings
548 + * @prefix: prefix for network device
549 + * @unit: id for network device
551 + * Check boot time settings for the base address of device.
552 + * The found settings are set for the device to be used
553 + * later in the device probing.
554 + * Returns 0 if no settings found.
556 +unsigned long netdev_boot_base(const char *prefix, int unit)
558 + const struct netdev_boot_setup *s = dev_boot_setup;
559 + char name[IFNAMSIZ];
562 + sprintf(name, "%s%d", prefix, unit);
565 + * If device already registered then return base of 1
566 + * to indicate not to probe for this interface
568 + if (__dev_get_by_name(&init_net, name))
571 + for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
572 + if (!strcmp(name, s[i].name))
573 + return s[i].map.base_addr;
578 + * Saves at boot time configured settings for any netdevice.
580 +int __init netdev_boot_setup(char *str)
585 + str = get_options(str, ARRAY_SIZE(ints), ints);
589 + /* Save settings */
590 + memset(&map, 0, sizeof(map));
594 + map.base_addr = ints[2];
596 + map.mem_start = ints[3];
598 + map.mem_end = ints[4];
600 + /* Add new entry to the list */
601 + return netdev_boot_setup_add(str, &map);
604 +__setup("netdev=", netdev_boot_setup);
606 +/*******************************************************************************
608 + Device Interface Subroutines
610 +*******************************************************************************/
613 + * __dev_get_by_name - find a device by its name
614 + * @net: the applicable net namespace
615 + * @name: name to find
617 + * Find an interface by name. Must be called under RTNL semaphore
618 + * or @dev_base_lock. If the name is found a pointer to the device
619 + * is returned. If the name is not found then %NULL is returned. The
620 + * reference counters are not incremented so the caller must be
621 + * careful with locks.
624 +struct net_device *__dev_get_by_name(struct net *net, const char *name)
626 + struct hlist_node *p;
628 + hlist_for_each(p, dev_name_hash(net, name)) {
629 + struct net_device *dev
630 + = hlist_entry(p, struct net_device, name_hlist);
631 + if (!strncmp(dev->name, name, IFNAMSIZ))
638 + * dev_get_by_name - find a device by its name
639 + * @net: the applicable net namespace
640 + * @name: name to find
642 + * Find an interface by name. This can be called from any
643 + * context and does its own locking. The returned handle has
644 + * the usage count incremented and the caller must use dev_put() to
645 + * release it when it is no longer needed. %NULL is returned if no
646 + * matching device is found.
649 +struct net_device *dev_get_by_name(struct net *net, const char *name)
651 + struct net_device *dev;
653 + read_lock(&dev_base_lock);
654 + dev = __dev_get_by_name(net, name);
657 + read_unlock(&dev_base_lock);
662 + * __dev_get_by_index - find a device by its ifindex
663 + * @net: the applicable net namespace
664 + * @ifindex: index of device
666 + * Search for an interface by index. Returns %NULL if the device
667 + * is not found or a pointer to the device. The device has not
668 + * had its reference counter increased so the caller must be careful
669 + * about locking. The caller must hold either the RTNL semaphore
670 + * or @dev_base_lock.
673 +struct net_device *__dev_get_by_index(struct net *net, int ifindex)
675 + struct hlist_node *p;
677 + hlist_for_each(p, dev_index_hash(net, ifindex)) {
678 + struct net_device *dev
679 + = hlist_entry(p, struct net_device, index_hlist);
680 + if (dev->ifindex == ifindex)
688 + * dev_get_by_index - find a device by its ifindex
689 + * @net: the applicable net namespace
690 + * @ifindex: index of device
692 + * Search for an interface by index. Returns NULL if the device
693 + * is not found or a pointer to the device. The device returned has
694 + * had a reference added and the pointer is safe until the user calls
695 + * dev_put to indicate they have finished with it.
698 +struct net_device *dev_get_by_index(struct net *net, int ifindex)
700 + struct net_device *dev;
702 + read_lock(&dev_base_lock);
703 + dev = __dev_get_by_index(net, ifindex);
706 + read_unlock(&dev_base_lock);
711 + * dev_getbyhwaddr - find a device by its hardware address
712 + * @net: the applicable net namespace
713 + * @type: media type of device
714 + * @ha: hardware address
716 + * Search for an interface by MAC address. Returns NULL if the device
717 + * is not found or a pointer to the device. The caller must hold the
718 + * rtnl semaphore. The returned device has not had its ref count increased
719 + * and the caller must therefore be careful about locking
722 + * If the API was consistent this would be __dev_get_by_hwaddr
725 +struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
727 + struct net_device *dev;
731 + for_each_netdev(net, dev)
732 + if (dev->type == type &&
733 + !memcmp(dev->dev_addr, ha, dev->addr_len))
739 +EXPORT_SYMBOL(dev_getbyhwaddr);
741 +struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
743 + struct net_device *dev;
746 + for_each_netdev(net, dev)
747 + if (dev->type == type)
753 +EXPORT_SYMBOL(__dev_getfirstbyhwtype);
755 +struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
757 + struct net_device *dev;
760 + dev = __dev_getfirstbyhwtype(net, type);
767 +EXPORT_SYMBOL(dev_getfirstbyhwtype);
770 + * dev_get_by_flags - find any device with given flags
771 + * @net: the applicable net namespace
772 + * @if_flags: IFF_* values
773 + * @mask: bitmask of bits in if_flags to check
775 + * Search for any interface with the given flags. Returns NULL if a device
776 + * is not found or a pointer to the device. The device returned has
777 + * had a reference added and the pointer is safe until the user calls
778 + * dev_put to indicate they have finished with it.
781 +struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
783 + struct net_device *dev, *ret;
786 + read_lock(&dev_base_lock);
787 + for_each_netdev(net, dev) {
788 + if (((dev->flags ^ if_flags) & mask) == 0) {
794 + read_unlock(&dev_base_lock);
799 + * dev_valid_name - check if name is okay for network device
800 + * @name: name string
802 + * Network device names need to be valid file names to
803 + * to allow sysfs to work. We also disallow any kind of
806 +int dev_valid_name(const char *name)
810 + if (strlen(name) >= IFNAMSIZ)
812 + if (!strcmp(name, ".") || !strcmp(name, ".."))
816 + if (*name == '/' || isspace(*name))
824 + * __dev_alloc_name - allocate a name for a device
825 + * @net: network namespace to allocate the device name in
826 + * @name: name format string
827 + * @buf: scratch buffer and result name string
829 + * Passed a format string - eg "lt%d" it will try and find a suitable
830 + * id. It scans list of devices to build up a free map, then chooses
831 + * the first empty slot. The caller must hold the dev_base or rtnl lock
832 + * while allocating the name and adding the device in order to avoid
834 + * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
835 + * Returns the number of the unit assigned or a negative errno code.
838 +static int __dev_alloc_name(struct net *net, const char *name, char *buf)
842 + const int max_netdevices = 8*PAGE_SIZE;
843 + unsigned long *inuse;
844 + struct net_device *d;
846 + p = strnchr(name, IFNAMSIZ-1, '%');
849 + * Verify the string as this thing may have come from
850 + * the user. There must be either one "%d" and no other "%"
853 + if (p[1] != 'd' || strchr(p + 2, '%'))
856 + /* Use one page as a bit array of possible slots */
857 + inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
861 + for_each_netdev(net, d) {
862 + if (!sscanf(d->name, name, &i))
864 + if (i < 0 || i >= max_netdevices)
867 + /* avoid cases where sscanf is not exact inverse of printf */
868 + snprintf(buf, IFNAMSIZ, name, i);
869 + if (!strncmp(buf, d->name, IFNAMSIZ))
873 + i = find_first_zero_bit(inuse, max_netdevices);
874 + free_page((unsigned long) inuse);
877 + snprintf(buf, IFNAMSIZ, name, i);
878 + if (!__dev_get_by_name(net, buf))
881 + /* It is possible to run out of possible slots
882 + * when the name is long and there isn't enough space left
883 + * for the digits, or if all bits are used.
889 + * dev_alloc_name - allocate a name for a device
891 + * @name: name format string
893 + * Passed a format string - eg "lt%d" it will try and find a suitable
894 + * id. It scans list of devices to build up a free map, then chooses
895 + * the first empty slot. The caller must hold the dev_base or rtnl lock
896 + * while allocating the name and adding the device in order to avoid
898 + * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
899 + * Returns the number of the unit assigned or a negative errno code.
902 +int dev_alloc_name(struct net_device *dev, const char *name)
904 + char buf[IFNAMSIZ];
908 + BUG_ON(!dev_net(dev));
909 + net = dev_net(dev);
910 + ret = __dev_alloc_name(net, name, buf);
912 + strlcpy(dev->name, buf, IFNAMSIZ);
918 + * dev_change_name - change name of a device
920 + * @newname: name (or format string) must be at least IFNAMSIZ
922 + * Change name of a device, can pass format strings "eth%d".
925 +int dev_change_name(struct net_device *dev, char *newname)
927 + char oldname[IFNAMSIZ];
933 + BUG_ON(!dev_net(dev));
935 + net = dev_net(dev);
936 + if (dev->flags & IFF_UP)
939 + if (!dev_valid_name(newname))
942 + if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
945 + memcpy(oldname, dev->name, IFNAMSIZ);
947 + if (strchr(newname, '%')) {
948 + err = dev_alloc_name(dev, newname);
951 + strcpy(newname, dev->name);
953 + else if (__dev_get_by_name(net, newname))
956 + strlcpy(dev->name, newname, IFNAMSIZ);
959 + /* For now only devices in the initial network namespace
962 + if (net == &init_net) {
963 + ret = device_rename(&dev->dev, dev->name);
965 + memcpy(dev->name, oldname, IFNAMSIZ);
970 + write_lock_bh(&dev_base_lock);
971 + hlist_del(&dev->name_hlist);
972 + hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
973 + write_unlock_bh(&dev_base_lock);
975 + ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
976 + ret = notifier_to_errno(ret);
981 + "%s: name change rollback failed: %d.\n",
985 + memcpy(dev->name, oldname, IFNAMSIZ);
994 + * netdev_features_change - device changes features
995 + * @dev: device to cause notification
997 + * Called to indicate a device has changed features.
999 +void netdev_features_change(struct net_device *dev)
1001 + call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1003 +EXPORT_SYMBOL(netdev_features_change);
1006 + * netdev_state_change - device changes state
1007 + * @dev: device to cause notification
1009 + * Called to indicate a device has changed state. This function calls
1010 + * the notifier chains for netdev_chain and sends a NEWLINK message
1011 + * to the routing socket.
1013 +void netdev_state_change(struct net_device *dev)
1015 + if (dev->flags & IFF_UP) {
1016 + call_netdevice_notifiers(NETDEV_CHANGE, dev);
1017 + rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1021 +void netdev_bonding_change(struct net_device *dev)
1023 + call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
1025 +EXPORT_SYMBOL(netdev_bonding_change);
1028 + * dev_load - load a network module
1029 + * @net: the applicable net namespace
1030 + * @name: name of interface
1032 + * If a network interface is not present and the process has suitable
1033 + * privileges this function loads the module. If module loading is not
1034 + * available in this kernel then it becomes a nop.
1037 +void dev_load(struct net *net, const char *name)
1039 + struct net_device *dev;
1041 + read_lock(&dev_base_lock);
1042 + dev = __dev_get_by_name(net, name);
1043 + read_unlock(&dev_base_lock);
1045 + if (!dev && capable(CAP_SYS_MODULE))
1046 + request_module("%s", name);
1050 + * dev_open - prepare an interface for use.
1051 + * @dev: device to open
1053 + * Takes a device from down to up state. The device's private open
1054 + * function is invoked and then the multicast lists are loaded. Finally
1055 + * the device is moved into the up state and a %NETDEV_UP message is
1056 + * sent to the netdev notifier chain.
1058 + * Calling this function on an active interface is a nop. On a failure
1059 + * a negative errno code is returned.
1061 +int dev_open(struct net_device *dev)
1068 + * Is it already up?
1071 + if (dev->flags & IFF_UP)
1075 + * Is it even present?
1077 + if (!netif_device_present(dev))
1081 + * Call device private open method
1083 + set_bit(__LINK_STATE_START, &dev->state);
1085 + if (dev->validate_addr)
1086 + ret = dev->validate_addr(dev);
1088 + if (!ret && dev->open)
1089 + ret = dev->open(dev);
1092 + * If it went open OK then:
1096 + clear_bit(__LINK_STATE_START, &dev->state);
1101 + dev->flags |= IFF_UP;
1104 + * Initialize multicasting status
1106 + dev_set_rx_mode(dev);
1109 + * Wakeup transmit queue engine
1111 + dev_activate(dev);
1114 + * ... and announce new interface.
1116 + call_netdevice_notifiers(NETDEV_UP, dev);
1123 + * dev_close - shutdown an interface.
1124 + * @dev: device to shutdown
1126 + * This function moves an active device into down state. A
1127 + * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1128 + * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1131 +int dev_close(struct net_device *dev)
1137 + if (!(dev->flags & IFF_UP))
1141 + * Tell people we are going down, so that they can
1142 + * prepare to death, when device is still operating.
1144 + call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1146 + clear_bit(__LINK_STATE_START, &dev->state);
1148 + /* Synchronize to scheduled poll. We cannot touch poll list,
1149 + * it can be even on different cpu. So just clear netif_running().
1151 + * dev->stop() will invoke napi_disable() on all of it's
1152 + * napi_struct instances on this device.
1154 + smp_mb__after_clear_bit(); /* Commit netif_running(). */
1156 + dev_deactivate(dev);
1159 + * Call the device specific close. This cannot fail.
1160 + * Only if device is UP
1162 + * We allow it to be called even after a DETACH hot-plug
1169 + * Device is now down.
1172 + dev->flags &= ~IFF_UP;
1175 + * Tell people we are down
1177 + call_netdevice_notifiers(NETDEV_DOWN, dev);
1184 + * dev_disable_lro - disable Large Receive Offload on a device
1187 + * Disable Large Receive Offload (LRO) on a net device. Must be
1188 + * called under RTNL. This is needed if received packets may be
1189 + * forwarded to another interface.
1191 +void dev_disable_lro(struct net_device *dev)
1193 + if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1194 + dev->ethtool_ops->set_flags) {
1195 + u32 flags = dev->ethtool_ops->get_flags(dev);
1196 + if (flags & ETH_FLAG_LRO) {
1197 + flags &= ~ETH_FLAG_LRO;
1198 + dev->ethtool_ops->set_flags(dev, flags);
1201 + WARN_ON(dev->features & NETIF_F_LRO);
1203 +EXPORT_SYMBOL(dev_disable_lro);
1206 +static int dev_boot_phase = 1;
1209 + * Device change register/unregister. These are not inline or static
1210 + * as we export them to the world.
1214 + * register_netdevice_notifier - register a network notifier block
1217 + * Register a notifier to be called when network device events occur.
1218 + * The notifier passed is linked into the kernel structures and must
1219 + * not be reused until it has been unregistered. A negative errno code
1220 + * is returned on a failure.
1222 + * When registered all registration and up events are replayed
1223 + * to the new notifier to allow device to have a race free
1224 + * view of the network device list.
1227 +int register_netdevice_notifier(struct notifier_block *nb)
1229 + struct net_device *dev;
1230 + struct net_device *last;
1235 + err = raw_notifier_chain_register(&netdev_chain, nb);
1238 + if (dev_boot_phase)
1240 + for_each_net(net) {
1241 + for_each_netdev(net, dev) {
1242 + err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1243 + err = notifier_to_errno(err);
1247 + if (!(dev->flags & IFF_UP))
1250 + nb->notifier_call(nb, NETDEV_UP, dev);
1260 + for_each_net(net) {
1261 + for_each_netdev(net, dev) {
1265 + if (dev->flags & IFF_UP) {
1266 + nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1267 + nb->notifier_call(nb, NETDEV_DOWN, dev);
1269 + nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1273 + raw_notifier_chain_unregister(&netdev_chain, nb);
1278 + * unregister_netdevice_notifier - unregister a network notifier block
1281 + * Unregister a notifier previously registered by
1282 + * register_netdevice_notifier(). The notifier is unlinked into the
1283 + * kernel structures and may then be reused. A negative errno code
1284 + * is returned on a failure.
1287 +int unregister_netdevice_notifier(struct notifier_block *nb)
1292 + err = raw_notifier_chain_unregister(&netdev_chain, nb);
1298 + * call_netdevice_notifiers - call all network notifier blocks
1299 + * @val: value passed unmodified to notifier function
1300 + * @dev: net_device pointer passed unmodified to notifier function
1302 + * Call all network notifier blocks. Parameters and return value
1303 + * are as for raw_notifier_call_chain().
1306 +int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1308 + return raw_notifier_call_chain(&netdev_chain, val, dev);
1311 +/* When > 0 there are consumers of rx skb time stamps */
1312 +static atomic_t netstamp_needed = ATOMIC_INIT(0);
1314 +void net_enable_timestamp(void)
1316 + atomic_inc(&netstamp_needed);
1319 +void net_disable_timestamp(void)
1321 + atomic_dec(&netstamp_needed);
1324 +static inline void net_timestamp(struct sk_buff *skb)
1326 + if (atomic_read(&netstamp_needed))
1327 + __net_timestamp(skb);
1329 + skb->tstamp.tv64 = 0;
1333 + * Support routine. Sends outgoing frames to any network
1334 + * taps currently in use.
1337 +static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1339 + struct packet_type *ptype;
1341 + net_timestamp(skb);
1344 + list_for_each_entry_rcu(ptype, &ptype_all, list) {
1345 + /* Never send packets back to the socket
1346 + * they originated from - MvS (miquels@drinkel.ow.org)
1348 + if ((ptype->dev == dev || !ptype->dev) &&
1349 + (ptype->af_packet_priv == NULL ||
1350 + (struct sock *)ptype->af_packet_priv != skb->sk)) {
1351 + struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1355 + /* skb->nh should be correctly
1356 + set by sender, so that the second statement is
1357 + just protection against buggy protocols.
1359 + skb_reset_mac_header(skb2);
1361 + if (skb_network_header(skb2) < skb2->data ||
1362 + skb2->network_header > skb2->tail) {
1363 + if (net_ratelimit())
1364 + printk(KERN_CRIT "protocol %04x is "
1365 + "buggy, dev %s\n",
1366 + skb2->protocol, dev->name);
1367 + skb_reset_network_header(skb2);
1370 + skb2->transport_header = skb2->network_header;
1371 + skb2->pkt_type = PACKET_OUTGOING;
1372 + ptype->func(skb2, skb->dev, ptype, skb->dev);
1375 + rcu_read_unlock();
1379 +static inline void __netif_reschedule(struct Qdisc *q)
1381 + struct softnet_data *sd;
1382 + unsigned long flags;
1384 + local_irq_save(flags);
1385 + sd = &__get_cpu_var(softnet_data);
1386 + q->next_sched = sd->output_queue;
1387 + sd->output_queue = q;
1388 + raise_softirq_irqoff(NET_TX_SOFTIRQ);
1389 + local_irq_restore(flags);
1392 +void __netif_schedule(struct Qdisc *q)
1394 + if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1395 + __netif_reschedule(q);
1397 +EXPORT_SYMBOL(__netif_schedule);
1399 +void dev_kfree_skb_irq(struct sk_buff *skb)
1401 + if (atomic_dec_and_test(&skb->users)) {
1402 + struct softnet_data *sd;
1403 + unsigned long flags;
1405 + local_irq_save(flags);
1406 + sd = &__get_cpu_var(softnet_data);
1407 + skb->next = sd->completion_queue;
1408 + sd->completion_queue = skb;
1409 + raise_softirq_irqoff(NET_TX_SOFTIRQ);
1410 + local_irq_restore(flags);
1413 +EXPORT_SYMBOL(dev_kfree_skb_irq);
1415 +void dev_kfree_skb_any(struct sk_buff *skb)
1417 + if (in_irq() || irqs_disabled())
1418 + dev_kfree_skb_irq(skb);
1420 + dev_kfree_skb(skb);
1422 +EXPORT_SYMBOL(dev_kfree_skb_any);
1426 + * netif_device_detach - mark device as removed
1427 + * @dev: network device
1429 + * Mark device as removed from system and therefore no longer available.
1431 +void netif_device_detach(struct net_device *dev)
1433 + if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1434 + netif_running(dev)) {
1435 + netif_stop_queue(dev);
1438 +EXPORT_SYMBOL(netif_device_detach);
1441 + * netif_device_attach - mark device as attached
1442 + * @dev: network device
1444 + * Mark device as attached from system and restart if needed.
1446 +void netif_device_attach(struct net_device *dev)
1448 + if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1449 + netif_running(dev)) {
1450 + netif_wake_queue(dev);
1451 + __netdev_watchdog_up(dev);
1454 +EXPORT_SYMBOL(netif_device_attach);
1456 +static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1458 + return ((features & NETIF_F_GEN_CSUM) ||
1459 + ((features & NETIF_F_IP_CSUM) &&
1460 + protocol == htons(ETH_P_IP)) ||
1461 + ((features & NETIF_F_IPV6_CSUM) &&
1462 + protocol == htons(ETH_P_IPV6)));
1465 +static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1467 + if (can_checksum_protocol(dev->features, skb->protocol))
1470 + if (skb->protocol == htons(ETH_P_8021Q)) {
1471 + struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1472 + if (can_checksum_protocol(dev->features & dev->vlan_features,
1473 + veh->h_vlan_encapsulated_proto))
1481 + * Invalidate hardware checksum when packet is to be mangled, and
1482 + * complete checksum manually on outgoing path.
1484 +int skb_checksum_help(struct sk_buff *skb)
1487 + int ret = 0, offset;
1489 + if (skb->ip_summed == CHECKSUM_COMPLETE)
1490 + goto out_set_summed;
1492 + if (unlikely(skb_shinfo(skb)->gso_size)) {
1493 + /* Let GSO fix up the checksum. */
1494 + goto out_set_summed;
1497 + offset = skb->csum_start - skb_headroom(skb);
1498 + BUG_ON(offset >= skb_headlen(skb));
1499 + csum = skb_checksum(skb, offset, skb->len - offset, 0);
1501 + offset += skb->csum_offset;
1502 + BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1504 + if (skb_cloned(skb) &&
1505 + !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1506 + ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1511 + *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1513 + skb->ip_summed = CHECKSUM_NONE;
1519 + * skb_gso_segment - Perform segmentation on skb.
1520 + * @skb: buffer to segment
1521 + * @features: features for the output path (see dev->features)
1523 + * This function segments the given skb and returns a list of segments.
1525 + * It may return NULL if the skb requires no segmentation. This is
1526 + * only possible when GSO is used for verifying header integrity.
1528 +struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1530 + struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1531 + struct packet_type *ptype;
1532 + __be16 type = skb->protocol;
1535 + BUG_ON(skb_shinfo(skb)->frag_list);
1537 + skb_reset_mac_header(skb);
1538 + skb->mac_len = skb->network_header - skb->mac_header;
1539 + __skb_pull(skb, skb->mac_len);
1541 + if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
1542 + if (skb_header_cloned(skb) &&
1543 + (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1544 + return ERR_PTR(err);
1548 + list_for_each_entry_rcu(ptype,
1549 + &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1550 + if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1551 + if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1552 + err = ptype->gso_send_check(skb);
1553 + segs = ERR_PTR(err);
1554 + if (err || skb_gso_ok(skb, features))
1556 + __skb_push(skb, (skb->data -
1557 + skb_network_header(skb)));
1559 + segs = ptype->gso_segment(skb, features);
1563 + rcu_read_unlock();
1565 + __skb_push(skb, skb->data - skb_mac_header(skb));
1570 +EXPORT_SYMBOL(skb_gso_segment);
1572 +/* Take action when hardware reception checksum errors are detected. */
1574 +void netdev_rx_csum_fault(struct net_device *dev)
1576 + if (net_ratelimit()) {
1577 + printk(KERN_ERR "%s: hw csum failure.\n",
1578 + dev ? dev->name : "<unknown>");
1582 +EXPORT_SYMBOL(netdev_rx_csum_fault);
1585 +/* Actually, we should eliminate this check as soon as we know, that:
1586 + * 1. IOMMU is present and allows to map all the memory.
1587 + * 2. No high memory really exists on this machine.
1590 +static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1592 +#ifdef CONFIG_HIGHMEM
1595 + if (dev->features & NETIF_F_HIGHDMA)
1598 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1599 + if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1606 +struct dev_gso_cb {
1607 + void (*destructor)(struct sk_buff *skb);
1610 +#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1612 +static void dev_gso_skb_destructor(struct sk_buff *skb)
1614 + struct dev_gso_cb *cb;
1617 + struct sk_buff *nskb = skb->next;
1619 + skb->next = nskb->next;
1620 + nskb->next = NULL;
1622 + } while (skb->next);
1624 + cb = DEV_GSO_CB(skb);
1625 + if (cb->destructor)
1626 + cb->destructor(skb);
1630 + * dev_gso_segment - Perform emulated hardware segmentation on skb.
1631 + * @skb: buffer to segment
1633 + * This function segments the given skb and stores the list of segments
1636 +static int dev_gso_segment(struct sk_buff *skb)
1638 + struct net_device *dev = skb->dev;
1639 + struct sk_buff *segs;
1640 + int features = dev->features & ~(illegal_highdma(dev, skb) ?
1643 + segs = skb_gso_segment(skb, features);
1645 + /* Verifying header integrity only. */
1650 + return PTR_ERR(segs);
1653 + DEV_GSO_CB(skb)->destructor = skb->destructor;
1654 + skb->destructor = dev_gso_skb_destructor;
1659 +int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1660 + struct netdev_queue *txq)
1662 + if (likely(!skb->next)) {
1663 + if (!list_empty(&ptype_all))
1664 + dev_queue_xmit_nit(skb, dev);
1666 + if (netif_needs_gso(dev, skb)) {
1667 + if (unlikely(dev_gso_segment(skb)))
1668 + goto out_kfree_skb;
1673 + return dev->hard_start_xmit(skb, dev);
1678 + struct sk_buff *nskb = skb->next;
1681 + skb->next = nskb->next;
1682 + nskb->next = NULL;
1683 + rc = dev->hard_start_xmit(nskb, dev);
1684 + if (unlikely(rc)) {
1685 + nskb->next = skb->next;
1689 + if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1690 + return NETDEV_TX_BUSY;
1691 + } while (skb->next);
1693 + skb->destructor = DEV_GSO_CB(skb)->destructor;
1700 +static u32 simple_tx_hashrnd;
1701 +static int simple_tx_hashrnd_initialized = 0;
1703 +static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1705 + u32 addr1, addr2, ports;
1709 + if (unlikely(!simple_tx_hashrnd_initialized)) {
1710 + get_random_bytes(&simple_tx_hashrnd, 4);
1711 + simple_tx_hashrnd_initialized = 1;
1714 + switch (skb->protocol) {
1715 + case __constant_htons(ETH_P_IP):
1716 + if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
1717 + ip_proto = ip_hdr(skb)->protocol;
1718 + addr1 = ip_hdr(skb)->saddr;
1719 + addr2 = ip_hdr(skb)->daddr;
1720 + ihl = ip_hdr(skb)->ihl;
1722 + case __constant_htons(ETH_P_IPV6):
1723 + ip_proto = ipv6_hdr(skb)->nexthdr;
1724 + addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
1725 + addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
1733 + switch (ip_proto) {
1736 + case IPPROTO_DCCP:
1739 + case IPPROTO_SCTP:
1740 + case IPPROTO_UDPLITE:
1741 + ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
1749 + hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
1751 + return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1754 +static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1755 + struct sk_buff *skb)
1757 + u16 queue_index = 0;
1759 + if (dev->select_queue)
1760 + queue_index = dev->select_queue(dev, skb);
1761 + else if (dev->real_num_tx_queues > 1)
1762 + queue_index = simple_tx_hash(dev, skb);
1764 + skb_set_queue_mapping(skb, queue_index);
1765 + return netdev_get_tx_queue(dev, queue_index);
1769 + * dev_queue_xmit - transmit a buffer
1770 + * @skb: buffer to transmit
1772 + * Queue a buffer for transmission to a network device. The caller must
1773 + * have set the device and priority and built the buffer before calling
1774 + * this function. The function can be called from an interrupt.
1776 + * A negative errno code is returned on a failure. A success does not
1777 + * guarantee the frame will be transmitted as it may be dropped due
1778 + * to congestion or traffic shaping.
1780 + * -----------------------------------------------------------------------------------
1781 + * I notice this method can also return errors from the queue disciplines,
1782 + * including NET_XMIT_DROP, which is a positive value. So, errors can also
1785 + * Regardless of the return value, the skb is consumed, so it is currently
1786 + * difficult to retry a send to this method. (You can bump the ref count
1787 + * before sending to hold a reference for retry if you are careful.)
1789 + * When calling this method, interrupts MUST be enabled. This is because
1790 + * the BH enable code must have IRQs enabled so that it will not deadlock.
1793 +int dev_queue_xmit(struct sk_buff *skb)
1795 + struct net_device *dev = skb->dev;
1796 + struct netdev_queue *txq;
1800 + /* GSO will handle the following emulations directly. */
1801 + if (netif_needs_gso(dev, skb))
1804 + if (skb_shinfo(skb)->frag_list &&
1805 + !(dev->features & NETIF_F_FRAGLIST) &&
1806 + __skb_linearize(skb))
1807 + goto out_kfree_skb;
1809 + /* Fragmented skb is linearized if device does not support SG,
1810 + * or if at least one of fragments is in highmem and device
1811 + * does not support DMA from it.
1813 + if (skb_shinfo(skb)->nr_frags &&
1814 + (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1815 + __skb_linearize(skb))
1816 + goto out_kfree_skb;
1818 + /* If packet is not checksummed and device does not support
1819 + * checksumming for this protocol, complete checksumming here.
1821 + if (skb->ip_summed == CHECKSUM_PARTIAL) {
1822 + skb_set_transport_header(skb, skb->csum_start -
1823 + skb_headroom(skb));
1824 + if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1825 + goto out_kfree_skb;
1829 + /* Disable soft irqs for various locks below. Also
1830 + * stops preemption for RCU.
1832 + rcu_read_lock_bh();
1834 + txq = dev_pick_tx(dev, skb);
1835 + q = rcu_dereference(txq->qdisc);
1837 +#ifdef CONFIG_NET_CLS_ACT
1838 + skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1841 + spinlock_t *root_lock = qdisc_lock(q);
1843 + spin_lock(root_lock);
1845 + if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1847 + rc = NET_XMIT_DROP;
1849 + rc = qdisc_enqueue_root(skb, q);
1852 + spin_unlock(root_lock);
1857 + /* The device has no queue. Common case for software devices:
1858 + loopback, all the sorts of tunnels...
1860 + Really, it is unlikely that netif_tx_lock protection is necessary
1861 + here. (f.e. loopback and IP tunnels are clean ignoring statistics
1863 + However, it is possible, that they rely on protection
1866 + Check this and shot the lock. It is not prone from deadlocks.
1867 + Either shot noqueue qdisc, it is even simpler 8)
1869 + if (dev->flags & IFF_UP) {
1870 + int cpu = smp_processor_id(); /* ok because BHs are off */
1872 + if (txq->xmit_lock_owner != cpu) {
1874 + HARD_TX_LOCK(dev, txq, cpu);
1876 + if (!netif_tx_queue_stopped(txq)) {
1878 + if (!dev_hard_start_xmit(skb, dev, txq)) {
1879 + HARD_TX_UNLOCK(dev, txq);
1883 + HARD_TX_UNLOCK(dev, txq);
1884 + if (net_ratelimit())
1885 + printk(KERN_CRIT "Virtual device %s asks to "
1886 + "queue packet!\n", dev->name);
1888 + /* Recursion is detected! It is possible,
1889 + * unfortunately */
1890 + if (net_ratelimit())
1891 + printk(KERN_CRIT "Dead loop on virtual device "
1892 + "%s, fix it urgently!\n", dev->name);
1897 + rcu_read_unlock_bh();
1903 + rcu_read_unlock_bh();
1908 +/*=======================================================================
1910 + =======================================================================*/
1912 +int netdev_max_backlog __read_mostly = 1000;
1913 +int netdev_budget __read_mostly = 300;
1914 +int weight_p __read_mostly = 64; /* old backlog weight */
1916 +DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1920 + * netif_rx - post buffer to the network code
1921 + * @skb: buffer to post
1923 + * This function receives a packet from a device driver and queues it for
1924 + * the upper (protocol) levels to process. It always succeeds. The buffer
1925 + * may be dropped during processing for congestion control or by the
1926 + * protocol layers.
1929 + * NET_RX_SUCCESS (no congestion)
1930 + * NET_RX_DROP (packet was dropped)
1934 +int netif_rx(struct sk_buff *skb)
1936 + struct softnet_data *queue;
1937 + unsigned long flags;
1939 + /* if netpoll wants it, pretend we never saw it */
1940 + if (netpoll_rx(skb))
1941 + return NET_RX_DROP;
1943 + if (!skb->tstamp.tv64)
1944 + net_timestamp(skb);
1947 + * The code is rearranged so that the path is the most
1948 + * short when CPU is congested, but is still operating.
1950 + local_irq_save(flags);
1951 + queue = &__get_cpu_var(softnet_data);
1953 + __get_cpu_var(netdev_rx_stat).total++;
1954 + if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1955 + if (queue->input_pkt_queue.qlen) {
1957 + __skb_queue_tail(&queue->input_pkt_queue, skb);
1958 + local_irq_restore(flags);
1959 + return NET_RX_SUCCESS;
1962 + napi_schedule(&queue->backlog);
1966 + __get_cpu_var(netdev_rx_stat).dropped++;
1967 + local_irq_restore(flags);
1970 + return NET_RX_DROP;
1973 +int netif_rx_ni(struct sk_buff *skb)
1977 + preempt_disable();
1978 + err = netif_rx(skb);
1979 + if (local_softirq_pending())
1986 +EXPORT_SYMBOL(netif_rx_ni);
1988 +static void net_tx_action(struct softirq_action *h)
1990 + struct softnet_data *sd = &__get_cpu_var(softnet_data);
1992 + if (sd->completion_queue) {
1993 + struct sk_buff *clist;
1995 + local_irq_disable();
1996 + clist = sd->completion_queue;
1997 + sd->completion_queue = NULL;
1998 + local_irq_enable();
2001 + struct sk_buff *skb = clist;
2002 + clist = clist->next;
2004 + WARN_ON(atomic_read(&skb->users));
2009 + if (sd->output_queue) {
2010 + struct Qdisc *head;
2012 + local_irq_disable();
2013 + head = sd->output_queue;
2014 + sd->output_queue = NULL;
2015 + local_irq_enable();
2018 + struct Qdisc *q = head;
2019 + spinlock_t *root_lock;
2021 + head = head->next_sched;
2023 + root_lock = qdisc_lock(q);
2024 + if (spin_trylock(root_lock)) {
2025 + smp_mb__before_clear_bit();
2026 + clear_bit(__QDISC_STATE_SCHED,
2029 + spin_unlock(root_lock);
2031 + if (!test_bit(__QDISC_STATE_DEACTIVATED,
2033 + __netif_reschedule(q);
2035 + smp_mb__before_clear_bit();
2036 + clear_bit(__QDISC_STATE_SCHED,
2044 +static inline int deliver_skb(struct sk_buff *skb,
2045 + struct packet_type *pt_prev,
2046 + struct net_device *orig_dev)
2048 + atomic_inc(&skb->users);
2049 + return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2054 + * Click: Allow Click to ask to intercept input packets.
2057 +register_net_in(struct notifier_block *nb)
2061 + err = raw_notifier_chain_register(&net_in_chain, nb);
2067 +unregister_net_in(struct notifier_block *nb)
2071 + err = raw_notifier_chain_unregister(&net_in_chain, nb);
2077 +#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2078 +/* These hooks defined here for ATM */
2080 +struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2081 + unsigned char *addr);
2082 +void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
2085 + * If bridge module is loaded call bridging hook.
2086 + * returns NULL if packet was consumed.
2088 +struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2089 + struct sk_buff *skb) __read_mostly;
2090 +static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2091 + struct packet_type **pt_prev, int *ret,
2092 + struct net_device *orig_dev)
2094 + struct net_bridge_port *port;
2096 + if (skb->pkt_type == PACKET_LOOPBACK ||
2097 + (port = rcu_dereference(skb->dev->br_port)) == NULL)
2101 + *ret = deliver_skb(skb, *pt_prev, orig_dev);
2105 + return br_handle_frame_hook(port, skb);
2108 +#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2111 +#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2112 +struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2113 +EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2115 +static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2116 + struct packet_type **pt_prev,
2118 + struct net_device *orig_dev)
2120 + if (skb->dev->macvlan_port == NULL)
2124 + *ret = deliver_skb(skb, *pt_prev, orig_dev);
2127 + return macvlan_handle_frame_hook(skb);
2130 +#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2133 +#ifdef CONFIG_NET_CLS_ACT
2134 +/* TODO: Maybe we should just force sch_ingress to be compiled in
2135 + * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2136 + * a compare and 2 stores extra right now if we dont have it on
2137 + * but have CONFIG_NET_CLS_ACT
2138 + * NOTE: This doesnt stop any functionality; if you dont have
2139 + * the ingress scheduler, you just cant add policies on ingress.
2142 +static int ing_filter(struct sk_buff *skb)
2144 + struct net_device *dev = skb->dev;
2145 + u32 ttl = G_TC_RTTL(skb->tc_verd);
2146 + struct netdev_queue *rxq;
2147 + int result = TC_ACT_OK;
2150 + if (MAX_RED_LOOP < ttl++) {
2151 + printk(KERN_WARNING
2152 + "Redir loop detected Dropping packet (%d->%d)\n",
2153 + skb->iif, dev->ifindex);
2154 + return TC_ACT_SHOT;
2157 + skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2158 + skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2160 + rxq = &dev->rx_queue;
2163 + if (q != &noop_qdisc) {
2164 + spin_lock(qdisc_lock(q));
2165 + if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2166 + result = qdisc_enqueue_root(skb, q);
2167 + spin_unlock(qdisc_lock(q));
2173 +static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2174 + struct packet_type **pt_prev,
2175 + int *ret, struct net_device *orig_dev)
2177 + if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2181 + *ret = deliver_skb(skb, *pt_prev, orig_dev);
2184 + /* Huh? Why does turning on AF_PACKET affect this? */
2185 + skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2188 + switch (ing_filter(skb)) {
2190 + case TC_ACT_STOLEN:
2202 + * netif_nit_deliver - deliver received packets to network taps
2205 + * This function is used to deliver incoming packets to network
2206 + * taps. It should be used when the normal netif_receive_skb path
2207 + * is bypassed, for example because of VLAN acceleration.
2209 +void netif_nit_deliver(struct sk_buff *skb)
2211 + struct packet_type *ptype;
2213 + if (list_empty(&ptype_all))
2216 + skb_reset_network_header(skb);
2217 + skb_reset_transport_header(skb);
2218 + skb->mac_len = skb->network_header - skb->mac_header;
2221 + list_for_each_entry_rcu(ptype, &ptype_all, list) {
2222 + if (!ptype->dev || ptype->dev == skb->dev)
2223 + deliver_skb(skb, ptype, skb->dev);
2225 + rcu_read_unlock();
2229 + * netif_receive_skb - process receive buffer from network
2230 + * @skb: buffer to process
2232 + * netif_receive_skb() is the main receive data processing function.
2233 + * It always succeeds. The buffer may be dropped during processing
2234 + * for congestion control or by the protocol layers.
2236 + * This function may only be called from softirq context and interrupts
2237 + * should be enabled.
2239 + * Return values (usually ignored):
2240 + * NET_RX_SUCCESS: no congestion
2241 + * NET_RX_DROP: packet was dropped
2243 +//int netif_receive_skb(struct sk_buff *skb)
2244 +int __netif_receive_skb(struct sk_buff *skb, unsigned short type, int notifier_data)
2246 + struct packet_type *ptype, *pt_prev;
2247 + struct net_device *orig_dev;
2248 + struct net_device *null_or_orig;
2249 + int ret = NET_RX_DROP;
2252 + /* if we've gotten here through NAPI, check netpoll */
2253 + if (netpoll_receive_skb(skb))
2254 + return NET_RX_DROP;
2256 + if (!skb->tstamp.tv64)
2257 + net_timestamp(skb);
2260 + skb->iif = skb->dev->ifindex;
2262 + null_or_orig = NULL;
2263 + orig_dev = skb->dev;
2264 + if (orig_dev->master) {
2265 + if (skb_bond_should_drop(skb))
2266 + null_or_orig = orig_dev; /* deliver only exact match */
2268 + skb->dev = orig_dev->master;
2271 + __get_cpu_var(netdev_rx_stat).total++;
2273 + skb_reset_network_header(skb);
2274 + skb_reset_transport_header(skb);
2275 + skb->mac_len = skb->network_header - skb->mac_header;
2277 + /* Click: may want to steal the packet */
2278 + if (notifier_data >= 0
2279 + && raw_notifier_call_chain(&net_in_chain,
2281 + skb) & NOTIFY_STOP_MASK) {
2289 + /* Don't receive packets in an exiting network namespace */
2290 + if (!net_alive(dev_net(skb->dev)))
2293 +#ifdef CONFIG_NET_CLS_ACT
2294 + if (skb->tc_verd & TC_NCLS) {
2295 + skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2300 + list_for_each_entry_rcu(ptype, &ptype_all, list) {
2301 + if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2302 + ptype->dev == orig_dev) {
2304 + ret = deliver_skb(skb, pt_prev, orig_dev);
2309 +#ifdef CONFIG_NET_CLS_ACT
2310 + skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2316 + skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2319 + skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2323 + type = skb->protocol;
2324 + list_for_each_entry_rcu(ptype,
2325 + &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2326 + if (ptype->type == type &&
2327 + (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2328 + ptype->dev == orig_dev)) {
2330 + ret = deliver_skb(skb, pt_prev, orig_dev);
2336 + ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2339 + /* Jamal, now you will not able to escape explaining
2340 + * me how you were going to use this. :-)
2342 + ret = NET_RX_DROP;
2346 + rcu_read_unlock();
2350 +/* Network device is going away, flush any packets still pending */
2351 +static void flush_backlog(void *arg)
2353 + struct net_device *dev = arg;
2354 + struct softnet_data *queue = &__get_cpu_var(softnet_data);
2355 + struct sk_buff *skb, *tmp;
2357 + skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2358 + if (skb->dev == dev) {
2359 + __skb_unlink(skb, &queue->input_pkt_queue);
2364 +static int process_backlog(struct napi_struct *napi, int quota)
2367 + struct softnet_data *queue = &__get_cpu_var(softnet_data);
2368 + unsigned long start_time = jiffies;
2370 + napi->weight = weight_p;
2372 + struct sk_buff *skb;
2374 + local_irq_disable();
2375 + skb = __skb_dequeue(&queue->input_pkt_queue);
2377 + __napi_complete(napi);
2378 + local_irq_enable();
2381 + local_irq_enable();
2383 + __netif_receive_skb(skb, skb->protocol, skb_queue_len(&queue->input_pkt_queue));
2384 +//XXX netif_receive_skb(skb);
2385 + } while (++work < quota && jiffies == start_time);
2391 + * __napi_schedule - schedule for receive
2392 + * @n: entry to schedule
2394 + * The entry's receive function will be scheduled to run
2396 +void __napi_schedule(struct napi_struct *n)
2398 + unsigned long flags;
2400 + local_irq_save(flags);
2401 + list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2402 + __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2403 + local_irq_restore(flags);
2405 +EXPORT_SYMBOL(__napi_schedule);
2408 +static void net_rx_action(struct softirq_action *h)
2410 + struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2411 + unsigned long start_time = jiffies;
2412 + int budget = netdev_budget;
2415 + local_irq_disable();
2417 + while (!list_empty(list)) {
2418 + struct napi_struct *n;
2421 + /* If softirq window is exhuasted then punt.
2423 + * Note that this is a slight policy change from the
2424 + * previous NAPI code, which would allow up to 2
2425 + * jiffies to pass before breaking out. The test
2426 + * used to be "jiffies - start_time > 1".
2428 + if (unlikely(budget <= 0 || jiffies != start_time))
2429 + goto softnet_break;
2431 + local_irq_enable();
2433 + /* Even though interrupts have been re-enabled, this
2434 + * access is safe because interrupts can only add new
2435 + * entries to the tail of this list, and only ->poll()
2436 + * calls can remove this head entry from the list.
2438 + n = list_entry(list->next, struct napi_struct, poll_list);
2440 + have = netpoll_poll_lock(n);
2442 + weight = n->weight;
2444 + /* This NAPI_STATE_SCHED test is for avoiding a race
2445 + * with netpoll's poll_napi(). Only the entity which
2446 + * obtains the lock and sees NAPI_STATE_SCHED set will
2447 + * actually make the ->poll() call. Therefore we avoid
2448 + * accidently calling ->poll() when NAPI is not scheduled.
2451 + if (test_bit(NAPI_STATE_SCHED, &n->state))
2452 + work = n->poll(n, weight);
2454 + WARN_ON_ONCE(work > weight);
2458 + local_irq_disable();
2460 + /* Drivers must not modify the NAPI state if they
2461 + * consume the entire weight. In such cases this code
2462 + * still "owns" the NAPI instance and therefore can
2463 + * move the instance around on the list at-will.
2465 + if (unlikely(work == weight)) {
2466 + if (unlikely(napi_disable_pending(n)))
2467 + __napi_complete(n);
2469 + list_move_tail(&n->poll_list, list);
2472 + netpoll_poll_unlock(have);
2475 + local_irq_enable();
2477 +#ifdef CONFIG_NET_DMA
2479 + * There may not be any more sk_buffs coming right now, so push
2480 + * any pending DMA copies to hardware
2482 + if (!cpus_empty(net_dma.channel_mask)) {
2484 + for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
2485 + struct dma_chan *chan = net_dma.channels[chan_idx];
2487 + dma_async_memcpy_issue_pending(chan);
2495 + __get_cpu_var(netdev_rx_stat).time_squeeze++;
2496 + __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2500 +static gifconf_func_t * gifconf_list [NPROTO];
2503 + * register_gifconf - register a SIOCGIF handler
2504 + * @family: Address family
2505 + * @gifconf: Function handler
2507 + * Register protocol dependent address dumping routines. The handler
2508 + * that is passed must not be freed or reused until it has been replaced
2509 + * by another handler.
2511 +int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2513 + if (family >= NPROTO)
2515 + gifconf_list[family] = gifconf;
2521 + * Map an interface index to its name (SIOCGIFNAME)
2525 + * We need this ioctl for efficient implementation of the
2526 + * if_indextoname() function required by the IPv6 API. Without
2527 + * it, we would have to search all the interfaces to find a
2531 +static int dev_ifname(struct net *net, struct ifreq __user *arg)
2533 + struct net_device *dev;
2537 + * Fetch the caller's info block.
2540 + if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2543 + read_lock(&dev_base_lock);
2544 + dev = __dev_get_by_index(net, ifr.ifr_ifindex);
2546 + read_unlock(&dev_base_lock);
2550 + strcpy(ifr.ifr_name, dev->name);
2551 + read_unlock(&dev_base_lock);
2553 + if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2559 + * Perform a SIOCGIFCONF call. This structure will change
2560 + * size eventually, and there is nothing I can do about it.
2561 + * Thus we will need a 'compatibility mode'.
2564 +static int dev_ifconf(struct net *net, char __user *arg)
2566 + struct ifconf ifc;
2567 + struct net_device *dev;
2574 + * Fetch the caller's info block.
2577 + if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2580 + pos = ifc.ifc_buf;
2581 + len = ifc.ifc_len;
2584 + * Loop over the interfaces, and write an info block for each.
2588 + for_each_netdev(net, dev) {
2589 + if (!nx_dev_visible(current->nx_info, dev))
2591 + for (i = 0; i < NPROTO; i++) {
2592 + if (gifconf_list[i]) {
2595 + done = gifconf_list[i](dev, NULL, 0);
2597 + done = gifconf_list[i](dev, pos + total,
2607 + * All done. Write the updated control block back to the caller.
2609 + ifc.ifc_len = total;
2612 + * Both BSD and Solaris return 0 here, so we do too.
2614 + return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2617 +#ifdef CONFIG_PROC_FS
2619 + * This is invoked by the /proc filesystem handler to display a device
2622 +void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2623 + __acquires(dev_base_lock)
2625 + struct net *net = seq_file_net(seq);
2627 + struct net_device *dev;
2629 + read_lock(&dev_base_lock);
2631 + return SEQ_START_TOKEN;
2634 + for_each_netdev(net, dev)
2635 + if (off++ == *pos)
2641 +void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2643 + struct net *net = seq_file_net(seq);
2645 + return v == SEQ_START_TOKEN ?
2646 + first_net_device(net) : next_net_device((struct net_device *)v);
2649 +void dev_seq_stop(struct seq_file *seq, void *v)
2650 + __releases(dev_base_lock)
2652 + read_unlock(&dev_base_lock);
2655 +static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2657 + struct net_device_stats *stats = dev->get_stats(dev);
2659 + if (!nx_dev_visible(current->nx_info, dev))
2662 + seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2663 + "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2664 + dev->name, stats->rx_bytes, stats->rx_packets,
2666 + stats->rx_dropped + stats->rx_missed_errors,
2667 + stats->rx_fifo_errors,
2668 + stats->rx_length_errors + stats->rx_over_errors +
2669 + stats->rx_crc_errors + stats->rx_frame_errors,
2670 + stats->rx_compressed, stats->multicast,
2671 + stats->tx_bytes, stats->tx_packets,
2672 + stats->tx_errors, stats->tx_dropped,
2673 + stats->tx_fifo_errors, stats->collisions,
2674 + stats->tx_carrier_errors +
2675 + stats->tx_aborted_errors +
2676 + stats->tx_window_errors +
2677 + stats->tx_heartbeat_errors,
2678 + stats->tx_compressed);
2682 + * Called from the PROCfs module. This now uses the new arbitrary sized
2683 + * /proc/net interface to create /proc/net/dev
2685 +static int dev_seq_show(struct seq_file *seq, void *v)
2687 + if (v == SEQ_START_TOKEN)
2688 + seq_puts(seq, "Inter-| Receive "
2690 + " face |bytes packets errs drop fifo frame "
2691 + "compressed multicast|bytes packets errs "
2692 + "drop fifo colls carrier compressed\n");
2694 + dev_seq_printf_stats(seq, v);
2698 +static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2700 + struct netif_rx_stats *rc = NULL;
2702 + while (*pos < nr_cpu_ids)
2703 + if (cpu_online(*pos)) {
2704 + rc = &per_cpu(netdev_rx_stat, *pos);
2711 +static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2713 + return softnet_get_online(pos);
2716 +static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2719 + return softnet_get_online(pos);
2722 +static void softnet_seq_stop(struct seq_file *seq, void *v)
2726 +static int softnet_seq_show(struct seq_file *seq, void *v)
2728 + struct netif_rx_stats *s = v;
2730 + seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2731 + s->total, s->dropped, s->time_squeeze, 0,
2732 + 0, 0, 0, 0, /* was fastroute */
2733 + s->cpu_collision );
2737 +static const struct seq_operations dev_seq_ops = {
2738 + .start = dev_seq_start,
2739 + .next = dev_seq_next,
2740 + .stop = dev_seq_stop,
2741 + .show = dev_seq_show,
2744 +static int dev_seq_open(struct inode *inode, struct file *file)
2746 + return seq_open_net(inode, file, &dev_seq_ops,
2747 + sizeof(struct seq_net_private));
2750 +static const struct file_operations dev_seq_fops = {
2751 + .owner = THIS_MODULE,
2752 + .open = dev_seq_open,
2754 + .llseek = seq_lseek,
2755 + .release = seq_release_net,
2758 +static const struct seq_operations softnet_seq_ops = {
2759 + .start = softnet_seq_start,
2760 + .next = softnet_seq_next,
2761 + .stop = softnet_seq_stop,
2762 + .show = softnet_seq_show,
2765 +static int softnet_seq_open(struct inode *inode, struct file *file)
2767 + return seq_open(file, &softnet_seq_ops);
2770 +static const struct file_operations softnet_seq_fops = {
2771 + .owner = THIS_MODULE,
2772 + .open = softnet_seq_open,
2774 + .llseek = seq_lseek,
2775 + .release = seq_release,
2778 +static void *ptype_get_idx(loff_t pos)
2780 + struct packet_type *pt = NULL;
2784 + list_for_each_entry_rcu(pt, &ptype_all, list) {
2790 + for (t = 0; t < PTYPE_HASH_SIZE; t++) {
2791 + list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2800 +static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
2804 + return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2807 +static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2809 + struct packet_type *pt;
2810 + struct list_head *nxt;
2814 + if (v == SEQ_START_TOKEN)
2815 + return ptype_get_idx(0);
2818 + nxt = pt->list.next;
2819 + if (pt->type == htons(ETH_P_ALL)) {
2820 + if (nxt != &ptype_all)
2823 + nxt = ptype_base[0].next;
2825 + hash = ntohs(pt->type) & PTYPE_HASH_MASK;
2827 + while (nxt == &ptype_base[hash]) {
2828 + if (++hash >= PTYPE_HASH_SIZE)
2830 + nxt = ptype_base[hash].next;
2833 + return list_entry(nxt, struct packet_type, list);
2836 +static void ptype_seq_stop(struct seq_file *seq, void *v)
2839 + rcu_read_unlock();
2842 +static void ptype_seq_decode(struct seq_file *seq, void *sym)
2844 +#ifdef CONFIG_KALLSYMS
2845 + unsigned long offset = 0, symsize;
2846 + const char *symname;
2848 + char namebuf[128];
2850 + symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2851 + &modname, namebuf);
2854 + char *delim = ":";
2857 + modname = delim = "";
2858 + seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2864 + seq_printf(seq, "[%p]", sym);
2867 +static int ptype_seq_show(struct seq_file *seq, void *v)
2869 + struct packet_type *pt = v;
2871 + if (v == SEQ_START_TOKEN)
2872 + seq_puts(seq, "Type Device Function\n");
2873 + else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
2874 + if (pt->type == htons(ETH_P_ALL))
2875 + seq_puts(seq, "ALL ");
2877 + seq_printf(seq, "%04x", ntohs(pt->type));
2879 + seq_printf(seq, " %-8s ",
2880 + pt->dev ? pt->dev->name : "");
2881 + ptype_seq_decode(seq, pt->func);
2882 + seq_putc(seq, '\n');
2888 +static const struct seq_operations ptype_seq_ops = {
2889 + .start = ptype_seq_start,
2890 + .next = ptype_seq_next,
2891 + .stop = ptype_seq_stop,
2892 + .show = ptype_seq_show,
2895 +static int ptype_seq_open(struct inode *inode, struct file *file)
2897 + return seq_open_net(inode, file, &ptype_seq_ops,
2898 + sizeof(struct seq_net_private));
2901 +static const struct file_operations ptype_seq_fops = {
2902 + .owner = THIS_MODULE,
2903 + .open = ptype_seq_open,
2905 + .llseek = seq_lseek,
2906 + .release = seq_release_net,
2910 +static int __net_init dev_proc_net_init(struct net *net)
2914 + if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
2916 + if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
2918 + if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
2921 + if (wext_proc_init(net))
2927 + proc_net_remove(net, "ptype");
2929 + proc_net_remove(net, "softnet_stat");
2931 + proc_net_remove(net, "dev");
2935 +static void __net_exit dev_proc_net_exit(struct net *net)
2937 + wext_proc_exit(net);
2939 + proc_net_remove(net, "ptype");
2940 + proc_net_remove(net, "softnet_stat");
2941 + proc_net_remove(net, "dev");
2944 +static struct pernet_operations __net_initdata dev_proc_ops = {
2945 + .init = dev_proc_net_init,
2946 + .exit = dev_proc_net_exit,
2949 +static int __init dev_proc_init(void)
2951 + return register_pernet_subsys(&dev_proc_ops);
2954 +#define dev_proc_init() 0
2955 +#endif /* CONFIG_PROC_FS */
2959 + * netdev_set_master - set up master/slave pair
2960 + * @slave: slave device
2961 + * @master: new master device
2963 + * Changes the master device of the slave. Pass %NULL to break the
2964 + * bonding. The caller must hold the RTNL semaphore. On a failure
2965 + * a negative errno code is returned. On success the reference counts
2966 + * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2967 + * function returns zero.
2969 +int netdev_set_master(struct net_device *slave, struct net_device *master)
2971 + struct net_device *old = slave->master;
2981 + slave->master = master;
2983 + synchronize_net();
2989 + slave->flags |= IFF_SLAVE;
2991 + slave->flags &= ~IFF_SLAVE;
2993 + rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2997 +static void dev_change_rx_flags(struct net_device *dev, int flags)
2999 + if (dev->flags & IFF_UP && dev->change_rx_flags)
3000 + dev->change_rx_flags(dev, flags);
3003 +static int __dev_set_promiscuity(struct net_device *dev, int inc)
3005 + unsigned short old_flags = dev->flags;
3009 + dev->flags |= IFF_PROMISC;
3010 + dev->promiscuity += inc;
3011 + if (dev->promiscuity == 0) {
3014 + * If inc causes overflow, untouch promisc and return error.
3017 + dev->flags &= ~IFF_PROMISC;
3019 + dev->promiscuity -= inc;
3020 + printk(KERN_WARNING "%s: promiscuity touches roof, "
3021 + "set promiscuity failed, promiscuity feature "
3022 + "of device might be broken.\n", dev->name);
3023 + return -EOVERFLOW;
3026 + if (dev->flags != old_flags) {
3027 + printk(KERN_INFO "device %s %s promiscuous mode\n",
3028 + dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3030 + if (audit_enabled)
3031 + audit_log(current->audit_context, GFP_ATOMIC,
3032 + AUDIT_ANOM_PROMISCUOUS,
3033 + "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3034 + dev->name, (dev->flags & IFF_PROMISC),
3035 + (old_flags & IFF_PROMISC),
3036 + audit_get_loginuid(current),
3037 + current->uid, current->gid,
3038 + audit_get_sessionid(current));
3040 + dev_change_rx_flags(dev, IFF_PROMISC);
3046 + * dev_set_promiscuity - update promiscuity count on a device
3050 + * Add or remove promiscuity from a device. While the count in the device
3051 + * remains above zero the interface remains promiscuous. Once it hits zero
3052 + * the device reverts back to normal filtering operation. A negative inc
3053 + * value is used to drop promiscuity on the device.
3054 + * Return 0 if successful or a negative errno code on error.
3056 +int dev_set_promiscuity(struct net_device *dev, int inc)
3058 + unsigned short old_flags = dev->flags;
3061 + err = __dev_set_promiscuity(dev, inc);
3064 + if (dev->flags != old_flags)
3065 + dev_set_rx_mode(dev);
3070 + * dev_set_allmulti - update allmulti count on a device
3074 + * Add or remove reception of all multicast frames to a device. While the
3075 + * count in the device remains above zero the interface remains listening
3076 + * to all interfaces. Once it hits zero the device reverts back to normal
3077 + * filtering operation. A negative @inc value is used to drop the counter
3078 + * when releasing a resource needing all multicasts.
3079 + * Return 0 if successful or a negative errno code on error.
3082 +int dev_set_allmulti(struct net_device *dev, int inc)
3084 + unsigned short old_flags = dev->flags;
3088 + dev->flags |= IFF_ALLMULTI;
3089 + dev->allmulti += inc;
3090 + if (dev->allmulti == 0) {
3093 + * If inc causes overflow, untouch allmulti and return error.
3096 + dev->flags &= ~IFF_ALLMULTI;
3098 + dev->allmulti -= inc;
3099 + printk(KERN_WARNING "%s: allmulti touches roof, "
3100 + "set allmulti failed, allmulti feature of "
3101 + "device might be broken.\n", dev->name);
3102 + return -EOVERFLOW;
3105 + if (dev->flags ^ old_flags) {
3106 + dev_change_rx_flags(dev, IFF_ALLMULTI);
3107 + dev_set_rx_mode(dev);
3113 + * Upload unicast and multicast address lists to device and
3114 + * configure RX filtering. When the device doesn't support unicast
3115 + * filtering it is put in promiscuous mode while unicast addresses
3118 +void __dev_set_rx_mode(struct net_device *dev)
3120 + /* dev_open will call this function so the list will stay sane. */
3121 + if (!(dev->flags&IFF_UP))
3124 + if (!netif_device_present(dev))
3127 + if (dev->set_rx_mode)
3128 + dev->set_rx_mode(dev);
3130 + /* Unicast addresses changes may only happen under the rtnl,
3131 + * therefore calling __dev_set_promiscuity here is safe.
3133 + if (dev->uc_count > 0 && !dev->uc_promisc) {
3134 + __dev_set_promiscuity(dev, 1);
3135 + dev->uc_promisc = 1;
3136 + } else if (dev->uc_count == 0 && dev->uc_promisc) {
3137 + __dev_set_promiscuity(dev, -1);
3138 + dev->uc_promisc = 0;
3141 + if (dev->set_multicast_list)
3142 + dev->set_multicast_list(dev);
3146 +void dev_set_rx_mode(struct net_device *dev)
3148 + netif_addr_lock_bh(dev);
3149 + __dev_set_rx_mode(dev);
3150 + netif_addr_unlock_bh(dev);
3153 +int __dev_addr_delete(struct dev_addr_list **list, int *count,
3154 + void *addr, int alen, int glbl)
3156 + struct dev_addr_list *da;
3158 + for (; (da = *list) != NULL; list = &da->next) {
3159 + if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3160 + alen == da->da_addrlen) {
3162 + int old_glbl = da->da_gusers;
3163 + da->da_gusers = 0;
3164 + if (old_glbl == 0)
3167 + if (--da->da_users)
3179 +int __dev_addr_add(struct dev_addr_list **list, int *count,
3180 + void *addr, int alen, int glbl)
3182 + struct dev_addr_list *da;
3184 + for (da = *list; da != NULL; da = da->next) {
3185 + if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3186 + da->da_addrlen == alen) {
3188 + int old_glbl = da->da_gusers;
3189 + da->da_gusers = 1;
3198 + da = kzalloc(sizeof(*da), GFP_ATOMIC);
3201 + memcpy(da->da_addr, addr, alen);
3202 + da->da_addrlen = alen;
3204 + da->da_gusers = glbl ? 1 : 0;
3212 + * dev_unicast_delete - Release secondary unicast address.
3214 + * @addr: address to delete
3215 + * @alen: length of @addr
3217 + * Release reference to a secondary unicast address and remove it
3218 + * from the device if the reference count drops to zero.
3220 + * The caller must hold the rtnl_mutex.
3222 +int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3228 + netif_addr_lock_bh(dev);
3229 + err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3231 + __dev_set_rx_mode(dev);
3232 + netif_addr_unlock_bh(dev);
3235 +EXPORT_SYMBOL(dev_unicast_delete);
3238 + * dev_unicast_add - add a secondary unicast address
3240 + * @addr: address to add
3241 + * @alen: length of @addr
3243 + * Add a secondary unicast address to the device or increase
3244 + * the reference count if it already exists.
3246 + * The caller must hold the rtnl_mutex.
3248 +int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3254 + netif_addr_lock_bh(dev);
3255 + err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3257 + __dev_set_rx_mode(dev);
3258 + netif_addr_unlock_bh(dev);
3261 +EXPORT_SYMBOL(dev_unicast_add);
3263 +int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3264 + struct dev_addr_list **from, int *from_count)
3266 + struct dev_addr_list *da, *next;
3270 + while (da != NULL) {
3272 + if (!da->da_synced) {
3273 + err = __dev_addr_add(to, to_count,
3274 + da->da_addr, da->da_addrlen, 0);
3277 + da->da_synced = 1;
3279 + } else if (da->da_users == 1) {
3280 + __dev_addr_delete(to, to_count,
3281 + da->da_addr, da->da_addrlen, 0);
3282 + __dev_addr_delete(from, from_count,
3283 + da->da_addr, da->da_addrlen, 0);
3290 +void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3291 + struct dev_addr_list **from, int *from_count)
3293 + struct dev_addr_list *da, *next;
3296 + while (da != NULL) {
3298 + if (da->da_synced) {
3299 + __dev_addr_delete(to, to_count,
3300 + da->da_addr, da->da_addrlen, 0);
3301 + da->da_synced = 0;
3302 + __dev_addr_delete(from, from_count,
3303 + da->da_addr, da->da_addrlen, 0);
3310 + * dev_unicast_sync - Synchronize device's unicast list to another device
3311 + * @to: destination device
3312 + * @from: source device
3314 + * Add newly added addresses to the destination device and release
3315 + * addresses that have no users left. The source device must be
3316 + * locked by netif_tx_lock_bh.
3318 + * This function is intended to be called from the dev->set_rx_mode
3319 + * function of layered software devices.
3321 +int dev_unicast_sync(struct net_device *to, struct net_device *from)
3325 + netif_addr_lock_bh(to);
3326 + err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3327 + &from->uc_list, &from->uc_count);
3329 + __dev_set_rx_mode(to);
3330 + netif_addr_unlock_bh(to);
3333 +EXPORT_SYMBOL(dev_unicast_sync);
3336 + * dev_unicast_unsync - Remove synchronized addresses from the destination device
3337 + * @to: destination device
3338 + * @from: source device
3340 + * Remove all addresses that were added to the destination device by
3341 + * dev_unicast_sync(). This function is intended to be called from the
3342 + * dev->stop function of layered software devices.
3344 +void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3346 + netif_addr_lock_bh(from);
3347 + netif_addr_lock(to);
3349 + __dev_addr_unsync(&to->uc_list, &to->uc_count,
3350 + &from->uc_list, &from->uc_count);
3351 + __dev_set_rx_mode(to);
3353 + netif_addr_unlock(to);
3354 + netif_addr_unlock_bh(from);
3356 +EXPORT_SYMBOL(dev_unicast_unsync);
3358 +static void __dev_addr_discard(struct dev_addr_list **list)
3360 + struct dev_addr_list *tmp;
3362 + while (*list != NULL) {
3364 + *list = tmp->next;
3365 + if (tmp->da_users > tmp->da_gusers)
3366 + printk("__dev_addr_discard: address leakage! "
3367 + "da_users=%d\n", tmp->da_users);
3372 +static void dev_addr_discard(struct net_device *dev)
3374 + netif_addr_lock_bh(dev);
3376 + __dev_addr_discard(&dev->uc_list);
3377 + dev->uc_count = 0;
3379 + __dev_addr_discard(&dev->mc_list);
3380 + dev->mc_count = 0;
3382 + netif_addr_unlock_bh(dev);
3385 +unsigned dev_get_flags(const struct net_device *dev)
3389 + flags = (dev->flags & ~(IFF_PROMISC |
3394 + (dev->gflags & (IFF_PROMISC |
3397 + if (netif_running(dev)) {
3398 + if (netif_oper_up(dev))
3399 + flags |= IFF_RUNNING;
3400 + if (netif_carrier_ok(dev))
3401 + flags |= IFF_LOWER_UP;
3402 + if (netif_dormant(dev))
3403 + flags |= IFF_DORMANT;
3409 +int dev_change_flags(struct net_device *dev, unsigned flags)
3412 + int old_flags = dev->flags;
3417 + * Set the flags on our device.
3420 + dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3421 + IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3423 + (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3427 + * Load in the correct multicast list now the flags have changed.
3430 + if ((old_flags ^ flags) & IFF_MULTICAST)
3431 + dev_change_rx_flags(dev, IFF_MULTICAST);
3433 + dev_set_rx_mode(dev);
3436 + * Have we downed the interface. We handle IFF_UP ourselves
3437 + * according to user attempts to set it, rather than blindly
3442 + if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3443 + ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3446 + dev_set_rx_mode(dev);
3449 + if (dev->flags & IFF_UP &&
3450 + ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3452 + call_netdevice_notifiers(NETDEV_CHANGE, dev);
3454 + if ((flags ^ dev->gflags) & IFF_PROMISC) {
3455 + int inc = (flags & IFF_PROMISC) ? +1 : -1;
3456 + dev->gflags ^= IFF_PROMISC;
3457 + dev_set_promiscuity(dev, inc);
3460 + /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3461 + is important. Some (broken) drivers set IFF_PROMISC, when
3462 + IFF_ALLMULTI is requested not asking us and not reporting.
3464 + if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3465 + int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3466 + dev->gflags ^= IFF_ALLMULTI;
3467 + dev_set_allmulti(dev, inc);
3470 + /* Exclude state transition flags, already notified */
3471 + changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3473 + rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
3478 +int dev_set_mtu(struct net_device *dev, int new_mtu)
3482 + if (new_mtu == dev->mtu)
3485 + /* MTU must be positive. */
3489 + if (!netif_device_present(dev))
3493 + if (dev->change_mtu)
3494 + err = dev->change_mtu(dev, new_mtu);
3496 + dev->mtu = new_mtu;
3497 + if (!err && dev->flags & IFF_UP)
3498 + call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
3502 +int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3506 + if (!dev->set_mac_address)
3507 + return -EOPNOTSUPP;
3508 + if (sa->sa_family != dev->type)
3510 + if (!netif_device_present(dev))
3512 + err = dev->set_mac_address(dev, sa);
3514 + call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3519 + * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
3521 +static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
3524 + struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3530 + case SIOCGIFFLAGS: /* Get interface flags */
3531 + ifr->ifr_flags = dev_get_flags(dev);
3534 + case SIOCGIFMETRIC: /* Get the metric on the interface
3535 + (currently unused) */
3536 + ifr->ifr_metric = 0;
3539 + case SIOCGIFMTU: /* Get the MTU of a device */
3540 + ifr->ifr_mtu = dev->mtu;
3543 + case SIOCGIFHWADDR:
3544 + if (!dev->addr_len)
3545 + memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3547 + memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3548 + min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3549 + ifr->ifr_hwaddr.sa_family = dev->type;
3552 + case SIOCGIFSLAVE:
3557 + ifr->ifr_map.mem_start = dev->mem_start;
3558 + ifr->ifr_map.mem_end = dev->mem_end;
3559 + ifr->ifr_map.base_addr = dev->base_addr;
3560 + ifr->ifr_map.irq = dev->irq;
3561 + ifr->ifr_map.dma = dev->dma;
3562 + ifr->ifr_map.port = dev->if_port;
3565 + case SIOCGIFINDEX:
3566 + ifr->ifr_ifindex = dev->ifindex;
3569 + case SIOCGIFTXQLEN:
3570 + ifr->ifr_qlen = dev->tx_queue_len;
3574 + /* dev_ioctl() should ensure this case
3575 + * is never reached
3586 + * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3588 +static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3591 + struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3597 + case SIOCSIFFLAGS: /* Set interface flags */
3598 + return dev_change_flags(dev, ifr->ifr_flags);
3600 + case SIOCSIFMETRIC: /* Set the metric on the interface
3601 + (currently unused) */
3602 + return -EOPNOTSUPP;
3604 + case SIOCSIFMTU: /* Set the MTU of a device */
3605 + return dev_set_mtu(dev, ifr->ifr_mtu);
3607 + case SIOCSIFHWADDR:
3608 + return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3610 + case SIOCSIFHWBROADCAST:
3611 + if (ifr->ifr_hwaddr.sa_family != dev->type)
3613 + memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3614 + min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3615 + call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3619 + if (dev->set_config) {
3620 + if (!netif_device_present(dev))
3622 + return dev->set_config(dev, &ifr->ifr_map);
3624 + return -EOPNOTSUPP;
3626 + case SIOCADDMULTI:
3627 + if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
3628 + ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3630 + if (!netif_device_present(dev))
3632 + return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3633 + dev->addr_len, 1);
3635 + case SIOCDELMULTI:
3636 + if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
3637 + ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3639 + if (!netif_device_present(dev))
3641 + return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3642 + dev->addr_len, 1);
3644 + case SIOCSIFTXQLEN:
3645 + if (ifr->ifr_qlen < 0)
3647 + dev->tx_queue_len = ifr->ifr_qlen;
3651 + ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3652 + return dev_change_name(dev, ifr->ifr_newname);
3655 + * Unknown or private ioctl
3659 + if ((cmd >= SIOCDEVPRIVATE &&
3660 + cmd <= SIOCDEVPRIVATE + 15) ||
3661 + cmd == SIOCBONDENSLAVE ||
3662 + cmd == SIOCBONDRELEASE ||
3663 + cmd == SIOCBONDSETHWADDR ||
3664 + cmd == SIOCBONDSLAVEINFOQUERY ||
3665 + cmd == SIOCBONDINFOQUERY ||
3666 + cmd == SIOCBONDCHANGEACTIVE ||
3667 + cmd == SIOCGMIIPHY ||
3668 + cmd == SIOCGMIIREG ||
3669 + cmd == SIOCSMIIREG ||
3670 + cmd == SIOCBRADDIF ||
3671 + cmd == SIOCBRDELIF ||
3672 + cmd == SIOCWANDEV) {
3673 + err = -EOPNOTSUPP;
3674 + if (dev->do_ioctl) {
3675 + if (netif_device_present(dev))
3676 + err = dev->do_ioctl(dev, ifr,
3689 + * This function handles all "interface"-type I/O control requests. The actual
3690 + * 'doing' part of this is dev_ifsioc above.
3694 + * dev_ioctl - network device ioctl
3695 + * @net: the applicable net namespace
3696 + * @cmd: command to issue
3697 + * @arg: pointer to a struct ifreq in user space
3699 + * Issue ioctl functions to devices. This is normally called by the
3700 + * user space syscall interfaces but can sometimes be useful for
3701 + * other purposes. The return value is the return from the syscall if
3702 + * positive or a negative errno code on error.
3705 +int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3711 + /* One special case: SIOCGIFCONF takes ifconf argument
3712 + and requires shared lock, because it sleeps writing
3716 + if (cmd == SIOCGIFCONF) {
3718 + ret = dev_ifconf(net, (char __user *) arg);
3722 + if (cmd == SIOCGIFNAME)
3723 + return dev_ifname(net, (struct ifreq __user *)arg);
3725 + if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3728 + ifr.ifr_name[IFNAMSIZ-1] = 0;
3730 + colon = strchr(ifr.ifr_name, ':');
3735 + * See which interface the caller is talking about.
3740 + * These ioctl calls:
3741 + * - can be done by all.
3742 + * - atomic and do not require locking.
3743 + * - return a value
3745 + case SIOCGIFFLAGS:
3746 + case SIOCGIFMETRIC:
3748 + case SIOCGIFHWADDR:
3749 + case SIOCGIFSLAVE:
3751 + case SIOCGIFINDEX:
3752 + case SIOCGIFTXQLEN:
3753 + dev_load(net, ifr.ifr_name);
3754 + read_lock(&dev_base_lock);
3755 + ret = dev_ifsioc_locked(net, &ifr, cmd);
3756 + read_unlock(&dev_base_lock);
3760 + if (copy_to_user(arg, &ifr,
3761 + sizeof(struct ifreq)))
3767 + dev_load(net, ifr.ifr_name);
3769 + ret = dev_ethtool(net, &ifr);
3774 + if (copy_to_user(arg, &ifr,
3775 + sizeof(struct ifreq)))
3781 + * These ioctl calls:
3782 + * - require superuser power.
3783 + * - require strict serialization.
3784 + * - return a value
3789 + if (!capable(CAP_NET_ADMIN))
3791 + dev_load(net, ifr.ifr_name);
3793 + ret = dev_ifsioc(net, &ifr, cmd);
3798 + if (copy_to_user(arg, &ifr,
3799 + sizeof(struct ifreq)))
3805 + * These ioctl calls:
3806 + * - require superuser power.
3807 + * - require strict serialization.
3808 + * - do not return a value
3810 + case SIOCSIFFLAGS:
3811 + case SIOCSIFMETRIC:
3814 + case SIOCSIFHWADDR:
3815 + case SIOCSIFSLAVE:
3816 + case SIOCADDMULTI:
3817 + case SIOCDELMULTI:
3818 + case SIOCSIFHWBROADCAST:
3819 + case SIOCSIFTXQLEN:
3821 + case SIOCBONDENSLAVE:
3822 + case SIOCBONDRELEASE:
3823 + case SIOCBONDSETHWADDR:
3824 + case SIOCBONDCHANGEACTIVE:
3827 + if (!capable(CAP_NET_ADMIN))
3829 + /* fall through */
3830 + case SIOCBONDSLAVEINFOQUERY:
3831 + case SIOCBONDINFOQUERY:
3832 + dev_load(net, ifr.ifr_name);
3834 + ret = dev_ifsioc(net, &ifr, cmd);
3839 + /* Get the per device memory space. We can add this but
3840 + * currently do not support it */
3842 + /* Set the per device memory buffer space.
3843 + * Not applicable in our case */
3848 + * Unknown or private ioctl.
3851 + if (cmd == SIOCWANDEV ||
3852 + (cmd >= SIOCDEVPRIVATE &&
3853 + cmd <= SIOCDEVPRIVATE + 15)) {
3854 + dev_load(net, ifr.ifr_name);
3856 + ret = dev_ifsioc(net, &ifr, cmd);
3858 + if (!ret && copy_to_user(arg, &ifr,
3859 + sizeof(struct ifreq)))
3863 + /* Take care of Wireless Extensions */
3864 + if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
3865 + return wext_handle_ioctl(net, &ifr, cmd, arg);
3872 + * dev_new_index - allocate an ifindex
3873 + * @net: the applicable net namespace
3875 + * Returns a suitable unique value for a new device interface
3876 + * number. The caller must hold the rtnl semaphore or the
3877 + * dev_base_lock to be sure it remains unique.
3879 +static int dev_new_index(struct net *net)
3881 + static int ifindex;
3883 + if (++ifindex <= 0)
3885 + if (!__dev_get_by_index(net, ifindex))
3890 +/* Delayed registration/unregisteration */
3891 +static LIST_HEAD(net_todo_list);
3893 +static void net_set_todo(struct net_device *dev)
3895 + list_add_tail(&dev->todo_list, &net_todo_list);
3898 +static void rollback_registered(struct net_device *dev)
3900 + BUG_ON(dev_boot_phase);
3903 + /* Some devices call without registering for initialization unwind. */
3904 + if (dev->reg_state == NETREG_UNINITIALIZED) {
3905 + printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3906 + "was registered\n", dev->name, dev);
3912 + BUG_ON(dev->reg_state != NETREG_REGISTERED);
3914 + /* If device is running, close it first. */
3917 + /* And unlink it from device chain. */
3918 + unlist_netdevice(dev);
3920 + dev->reg_state = NETREG_UNREGISTERING;
3922 + synchronize_net();
3924 + /* Shutdown queueing discipline. */
3925 + dev_shutdown(dev);
3928 + /* Notify protocols, that we are about to destroy
3929 + this device. They should clean all the things.
3931 + call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3934 + * Flush the unicast and multicast chains
3936 + dev_addr_discard(dev);
3941 + /* Notifier chain MUST detach us from master device. */
3942 + WARN_ON(dev->master);
3944 + /* Remove entries from kobject tree */
3945 + netdev_unregister_kobject(dev);
3947 + synchronize_net();
3952 +static void __netdev_init_queue_locks_one(struct net_device *dev,
3953 + struct netdev_queue *dev_queue,
3956 + spin_lock_init(&dev_queue->_xmit_lock);
3957 + netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
3958 + dev_queue->xmit_lock_owner = -1;
3961 +static void netdev_init_queue_locks(struct net_device *dev)
3963 + netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
3964 + __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
3968 + * register_netdevice - register a network device
3969 + * @dev: device to register
3971 + * Take a completed network device structure and add it to the kernel
3972 + * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3973 + * chain. 0 is returned on success. A negative errno code is returned
3974 + * on a failure to set up the device, or if the name is a duplicate.
3976 + * Callers must hold the rtnl semaphore. You may want
3977 + * register_netdev() instead of this.
3980 + * The locking appears insufficient to guarantee two parallel registers
3981 + * will not get the same name.
3984 +int register_netdevice(struct net_device *dev)
3986 + struct hlist_head *head;
3987 + struct hlist_node *p;
3991 + BUG_ON(dev_boot_phase);
3996 + /* When net_device's are persistent, this will be fatal. */
3997 + BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
3998 + BUG_ON(!dev_net(dev));
3999 + net = dev_net(dev);
4001 + spin_lock_init(&dev->addr_list_lock);
4002 + netdev_set_addr_lockdep_class(dev);
4003 + netdev_init_queue_locks(dev);
4007 + /* Init, if this function is available */
4009 + ret = dev->init(dev);
4017 + if (!dev_valid_name(dev->name)) {
4022 + dev->ifindex = dev_new_index(net);
4023 + if (dev->iflink == -1)
4024 + dev->iflink = dev->ifindex;
4026 + /* Check for existence of name */
4027 + head = dev_name_hash(net, dev->name);
4028 + hlist_for_each(p, head) {
4029 + struct net_device *d
4030 + = hlist_entry(p, struct net_device, name_hlist);
4031 + if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4037 + /* Fix illegal checksum combinations */
4038 + if ((dev->features & NETIF_F_HW_CSUM) &&
4039 + (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4040 + printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4042 + dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4045 + if ((dev->features & NETIF_F_NO_CSUM) &&
4046 + (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4047 + printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4049 + dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4053 + /* Fix illegal SG+CSUM combinations. */
4054 + if ((dev->features & NETIF_F_SG) &&
4055 + !(dev->features & NETIF_F_ALL_CSUM)) {
4056 + printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
4058 + dev->features &= ~NETIF_F_SG;
4061 + /* TSO requires that SG is present as well. */
4062 + if ((dev->features & NETIF_F_TSO) &&
4063 + !(dev->features & NETIF_F_SG)) {
4064 + printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
4066 + dev->features &= ~NETIF_F_TSO;
4068 + if (dev->features & NETIF_F_UFO) {
4069 + if (!(dev->features & NETIF_F_GEN_CSUM)) {
4070 + printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
4071 + "NETIF_F_HW_CSUM feature.\n",
4073 + dev->features &= ~NETIF_F_UFO;
4075 + if (!(dev->features & NETIF_F_SG)) {
4076 + printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
4077 + "NETIF_F_SG feature.\n",
4079 + dev->features &= ~NETIF_F_UFO;
4083 + /* Enable software GSO if SG is supported. */
4084 + if (dev->features & NETIF_F_SG)
4085 + dev->features |= NETIF_F_GSO;
4087 + netdev_initialize_kobject(dev);
4088 + ret = netdev_register_kobject(dev);
4091 + dev->reg_state = NETREG_REGISTERED;
4094 + * Default initial state at registry is that the
4095 + * device is present.
4098 + set_bit(__LINK_STATE_PRESENT, &dev->state);
4100 + dev_init_scheduler(dev);
4102 + list_netdevice(dev);
4104 + /* Notify protocols, that a new device appeared. */
4105 + ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
4106 + ret = notifier_to_errno(ret);
4108 + rollback_registered(dev);
4109 + dev->reg_state = NETREG_UNREGISTERED;
4122 + * register_netdev - register a network device
4123 + * @dev: device to register
4125 + * Take a completed network device structure and add it to the kernel
4126 + * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4127 + * chain. 0 is returned on success. A negative errno code is returned
4128 + * on a failure to set up the device, or if the name is a duplicate.
4130 + * This is a wrapper around register_netdevice that takes the rtnl semaphore
4131 + * and expands the device name if you passed a format string to
4134 +int register_netdev(struct net_device *dev)
4141 + * If the name is a format string the caller wants us to do a
4142 + * name allocation.
4144 + if (strchr(dev->name, '%')) {
4145 + err = dev_alloc_name(dev, dev->name);
4150 + err = register_netdevice(dev);
4155 +EXPORT_SYMBOL(register_netdev);
4158 + * netdev_wait_allrefs - wait until all references are gone.
4160 + * This is called when unregistering network devices.
4162 + * Any protocol or device that holds a reference should register
4163 + * for netdevice notification, and cleanup and put back the
4164 + * reference if they receive an UNREGISTER event.
4165 + * We can get stuck here if buggy protocols don't correctly
4168 +static void netdev_wait_allrefs(struct net_device *dev)
4170 + unsigned long rebroadcast_time, warning_time;
4172 + rebroadcast_time = warning_time = jiffies;
4173 + while (atomic_read(&dev->refcnt) != 0) {
4174 + if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
4177 + /* Rebroadcast unregister notification */
4178 + call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4180 + if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4182 + /* We must not have linkwatch events
4183 + * pending on unregister. If this
4184 + * happens, we simply run the queue
4185 + * unscheduled, resulting in a noop
4186 + * for this device.
4188 + linkwatch_run_queue();
4193 + rebroadcast_time = jiffies;
4198 + if (time_after(jiffies, warning_time + 10 * HZ)) {
4199 + printk(KERN_EMERG "unregister_netdevice: "
4200 + "waiting for %s to become free. Usage "
4202 + dev->name, atomic_read(&dev->refcnt));
4203 + warning_time = jiffies;
4208 +/* The sequence is:
4212 + * register_netdevice(x1);
4213 + * register_netdevice(x2);
4215 + * unregister_netdevice(y1);
4216 + * unregister_netdevice(y2);
4219 + * free_netdev(y1);
4220 + * free_netdev(y2);
4222 + * We are invoked by rtnl_unlock().
4223 + * This allows us to deal with problems:
4224 + * 1) We can delete sysfs objects which invoke hotplug
4225 + * without deadlocking with linkwatch via keventd.
4226 + * 2) Since we run with the RTNL semaphore not held, we can sleep
4227 + * safely in order to wait for the netdev refcnt to drop to zero.
4229 + * We must not return until all unregister events added during
4230 + * the interval the lock was held have been completed.
4232 +void netdev_run_todo(void)
4234 + struct list_head list;
4236 + /* Snapshot list, allow later requests */
4237 + list_replace_init(&net_todo_list, &list);
4241 + while (!list_empty(&list)) {
4242 + struct net_device *dev
4243 + = list_entry(list.next, struct net_device, todo_list);
4244 + list_del(&dev->todo_list);
4246 + if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
4247 + printk(KERN_ERR "network todo '%s' but state %d\n",
4248 + dev->name, dev->reg_state);
4253 + dev->reg_state = NETREG_UNREGISTERED;
4255 + on_each_cpu(flush_backlog, dev, 1);
4257 + netdev_wait_allrefs(dev);
4260 + BUG_ON(atomic_read(&dev->refcnt));
4261 + WARN_ON(dev->ip_ptr);
4262 + WARN_ON(dev->ip6_ptr);
4263 + WARN_ON(dev->dn_ptr);
4265 + if (dev->destructor)
4266 + dev->destructor(dev);
4268 + /* Free network device */
4269 + kobject_put(&dev->dev.kobj);
4273 +static struct net_device_stats *internal_stats(struct net_device *dev)
4275 + return &dev->stats;
4278 +static void netdev_init_one_queue(struct net_device *dev,
4279 + struct netdev_queue *queue,
4285 +static void netdev_init_queues(struct net_device *dev)
4287 + netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4288 + netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4289 + spin_lock_init(&dev->tx_global_lock);
4293 + * alloc_netdev_mq - allocate network device
4294 + * @sizeof_priv: size of private data to allocate space for
4295 + * @name: device name format string
4296 + * @setup: callback to initialize device
4297 + * @queue_count: the number of subqueues to allocate
4299 + * Allocates a struct net_device with private data area for driver use
4300 + * and performs basic initialization. Also allocates subquue structs
4301 + * for each queue on the device at the end of the netdevice.
4303 +struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4304 + void (*setup)(struct net_device *), unsigned int queue_count)
4306 + struct netdev_queue *tx;
4307 + struct net_device *dev;
4308 + size_t alloc_size;
4311 + BUG_ON(strlen(name) >= sizeof(dev->name));
4313 + alloc_size = sizeof(struct net_device);
4314 + if (sizeof_priv) {
4315 + /* ensure 32-byte alignment of private area */
4316 + alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4317 + alloc_size += sizeof_priv;
4319 + /* ensure 32-byte alignment of whole construct */
4320 + alloc_size += NETDEV_ALIGN_CONST;
4322 + p = kzalloc(alloc_size, GFP_KERNEL);
4324 + printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
4328 + tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
4330 + printk(KERN_ERR "alloc_netdev: Unable to allocate "
4336 + dev = (struct net_device *)
4337 + (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4338 + dev->padded = (char *)dev - (char *)p;
4339 + dev_net_set(dev, &init_net);
4342 + dev->num_tx_queues = queue_count;
4343 + dev->real_num_tx_queues = queue_count;
4345 + if (sizeof_priv) {
4346 + dev->priv = ((char *)dev +
4347 + ((sizeof(struct net_device) + NETDEV_ALIGN_CONST)
4348 + & ~NETDEV_ALIGN_CONST));
4351 + dev->gso_max_size = GSO_MAX_SIZE;
4353 + netdev_init_queues(dev);
4355 + dev->get_stats = internal_stats;
4356 + netpoll_netdev_init(dev);
4358 + strcpy(dev->name, name);
4361 +EXPORT_SYMBOL(alloc_netdev_mq);
4364 + * free_netdev - free network device
4367 + * This function does the last stage of destroying an allocated device
4368 + * interface. The reference to the device object is released.
4369 + * If this is the last reference then it will be freed.
4371 +void free_netdev(struct net_device *dev)
4373 + release_net(dev_net(dev));
4377 + /* Compatibility with error handling in drivers */
4378 + if (dev->reg_state == NETREG_UNINITIALIZED) {
4379 + kfree((char *)dev - dev->padded);
4383 + BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4384 + dev->reg_state = NETREG_RELEASED;
4386 + /* will free via device release */
4387 + put_device(&dev->dev);
4390 +/* Synchronize with packet receive processing. */
4391 +void synchronize_net(void)
4394 + synchronize_rcu();
4398 + * unregister_netdevice - remove device from the kernel
4401 + * This function shuts down a device interface and removes it
4402 + * from the kernel tables.
4404 + * Callers must hold the rtnl semaphore. You may want
4405 + * unregister_netdev() instead of this.
4408 +void unregister_netdevice(struct net_device *dev)
4412 + rollback_registered(dev);
4413 + /* Finish processing unregister after unlock */
4414 + net_set_todo(dev);
4418 + * unregister_netdev - remove device from the kernel
4421 + * This function shuts down a device interface and removes it
4422 + * from the kernel tables.
4424 + * This is just a wrapper for unregister_netdevice that takes
4425 + * the rtnl semaphore. In general you want to use this and not
4426 + * unregister_netdevice.
4428 +void unregister_netdev(struct net_device *dev)
4431 + unregister_netdevice(dev);
4435 +EXPORT_SYMBOL(unregister_netdev);
4438 + * dev_change_net_namespace - move device to different nethost namespace
4440 + * @net: network namespace
4441 + * @pat: If not NULL name pattern to try if the current device name
4442 + * is already taken in the destination network namespace.
4444 + * This function shuts down a device interface and moves it
4445 + * to a new network namespace. On success 0 is returned, on
4446 + * a failure a netagive errno code is returned.
4448 + * Callers must hold the rtnl semaphore.
4451 +int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4453 + char buf[IFNAMSIZ];
4454 + const char *destname;
4459 + /* Don't allow namespace local devices to be moved. */
4461 + if (dev->features & NETIF_F_NETNS_LOCAL)
4464 +#ifdef CONFIG_SYSFS
4465 + /* Don't allow real devices to be moved when sysfs
4469 + if (dev->dev.parent)
4473 + /* Ensure the device has been registrered */
4475 + if (dev->reg_state != NETREG_REGISTERED)
4478 + /* Get out if there is nothing todo */
4480 + if (net_eq(dev_net(dev), net))
4483 + /* Pick the destination device name, and ensure
4484 + * we can use it in the destination network namespace.
4487 + destname = dev->name;
4488 + if (__dev_get_by_name(net, destname)) {
4489 + /* We get here if we can't use the current device name */
4492 + if (!dev_valid_name(pat))
4494 + if (strchr(pat, '%')) {
4495 + if (__dev_alloc_name(net, pat, buf) < 0)
4500 + if (__dev_get_by_name(net, destname))
4505 + * And now a mini version of register_netdevice unregister_netdevice.
4508 + /* If device is running close it first. */
4511 + /* And unlink it from device chain */
4513 + unlist_netdevice(dev);
4515 + synchronize_net();
4517 + /* Shutdown queueing discipline. */
4518 + dev_shutdown(dev);
4520 + /* Notify protocols, that we are about to destroy
4521 + this device. They should clean all the things.
4523 + call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4526 + * Flush the unicast and multicast chains
4528 + dev_addr_discard(dev);
4530 + netdev_unregister_kobject(dev);
4532 + /* Actually switch the network namespace */
4533 + dev_net_set(dev, net);
4535 + /* Assign the new device name */
4536 + if (destname != dev->name)
4537 + strcpy(dev->name, destname);
4539 + /* If there is an ifindex conflict assign a new one */
4540 + if (__dev_get_by_index(net, dev->ifindex)) {
4541 + int iflink = (dev->iflink == dev->ifindex);
4542 + dev->ifindex = dev_new_index(net);
4544 + dev->iflink = dev->ifindex;
4547 + /* Fixup kobjects */
4548 + err = netdev_register_kobject(dev);
4551 + /* Add the device back in the hashes */
4552 + list_netdevice(dev);
4554 + /* Notify protocols, that a new device appeared. */
4555 + call_netdevice_notifiers(NETDEV_REGISTER, dev);
4557 + synchronize_net();
4563 +static int dev_cpu_callback(struct notifier_block *nfb,
4564 + unsigned long action,
4567 + struct sk_buff **list_skb;
4568 + struct Qdisc **list_net;
4569 + struct sk_buff *skb;
4570 + unsigned int cpu, oldcpu = (unsigned long)ocpu;
4571 + struct softnet_data *sd, *oldsd;
4573 + if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
4576 + local_irq_disable();
4577 + cpu = smp_processor_id();
4578 + sd = &per_cpu(softnet_data, cpu);
4579 + oldsd = &per_cpu(softnet_data, oldcpu);
4581 + /* Find end of our completion_queue. */
4582 + list_skb = &sd->completion_queue;
4584 + list_skb = &(*list_skb)->next;
4585 + /* Append completion queue from offline CPU. */
4586 + *list_skb = oldsd->completion_queue;
4587 + oldsd->completion_queue = NULL;
4589 + /* Find end of our output_queue. */
4590 + list_net = &sd->output_queue;
4592 + list_net = &(*list_net)->next_sched;
4593 + /* Append output queue from offline CPU. */
4594 + *list_net = oldsd->output_queue;
4595 + oldsd->output_queue = NULL;
4597 + raise_softirq_irqoff(NET_TX_SOFTIRQ);
4598 + local_irq_enable();
4600 + /* Process offline CPU's input_pkt_queue */
4601 + while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
4607 +#ifdef CONFIG_NET_DMA
4609 + * net_dma_rebalance - try to maintain one DMA channel per CPU
4610 + * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4612 + * This is called when the number of channels allocated to the net_dma client
4613 + * changes. The net_dma client tries to have one DMA channel per CPU.
4616 +static void net_dma_rebalance(struct net_dma *net_dma)
4618 + unsigned int cpu, i, n, chan_idx;
4619 + struct dma_chan *chan;
4621 + if (cpus_empty(net_dma->channel_mask)) {
4622 + for_each_online_cpu(cpu)
4623 + rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
4628 + cpu = first_cpu(cpu_online_map);
4630 + for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
4631 + chan = net_dma->channels[chan_idx];
4633 + n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4634 + + (i < (num_online_cpus() %
4635 + cpus_weight(net_dma->channel_mask)) ? 1 : 0));
4638 + per_cpu(softnet_data, cpu).net_dma = chan;
4639 + cpu = next_cpu(cpu, cpu_online_map);
4647 + * netdev_dma_event - event callback for the net_dma_client
4648 + * @client: should always be net_dma_client
4649 + * @chan: DMA channel for the event
4650 + * @state: DMA state to be handled
4652 +static enum dma_state_client
4653 +netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4654 + enum dma_state state)
4656 + int i, found = 0, pos = -1;
4657 + struct net_dma *net_dma =
4658 + container_of(client, struct net_dma, client);
4659 + enum dma_state_client ack = DMA_DUP; /* default: take no action */
4661 + spin_lock(&net_dma->lock);
4663 + case DMA_RESOURCE_AVAILABLE:
4664 + for (i = 0; i < nr_cpu_ids; i++)
4665 + if (net_dma->channels[i] == chan) {
4668 + } else if (net_dma->channels[i] == NULL && pos < 0)
4671 + if (!found && pos >= 0) {
4673 + net_dma->channels[pos] = chan;
4674 + cpu_set(pos, net_dma->channel_mask);
4675 + net_dma_rebalance(net_dma);
4678 + case DMA_RESOURCE_REMOVED:
4679 + for (i = 0; i < nr_cpu_ids; i++)
4680 + if (net_dma->channels[i] == chan) {
4688 + cpu_clear(pos, net_dma->channel_mask);
4689 + net_dma->channels[i] = NULL;
4690 + net_dma_rebalance(net_dma);
4696 + spin_unlock(&net_dma->lock);
4702 + * netdev_dma_regiser - register the networking subsystem as a DMA client
4704 +static int __init netdev_dma_register(void)
4706 + net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4708 + if (unlikely(!net_dma.channels)) {
4709 + printk(KERN_NOTICE
4710 + "netdev_dma: no memory for net_dma.channels\n");
4713 + spin_lock_init(&net_dma.lock);
4714 + dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4715 + dma_async_client_register(&net_dma.client);
4716 + dma_async_client_chan_request(&net_dma.client);
4721 +static int __init netdev_dma_register(void) { return -ENODEV; }
4722 +#endif /* CONFIG_NET_DMA */
4725 + * netdev_compute_feature - compute conjunction of two feature sets
4726 + * @all: first feature set
4727 + * @one: second feature set
4729 + * Computes a new feature set after adding a device with feature set
4730 + * @one to the master device with current feature set @all. Returns
4731 + * the new feature set.
4733 +int netdev_compute_features(unsigned long all, unsigned long one)
4735 + /* if device needs checksumming, downgrade to hw checksumming */
4736 + if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4737 + all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4739 + /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4740 + if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4741 + all ^= NETIF_F_HW_CSUM
4742 + | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4744 + if (one & NETIF_F_GSO)
4745 + one |= NETIF_F_GSO_SOFTWARE;
4746 + one |= NETIF_F_GSO;
4748 + /* If even one device supports robust GSO, enable it for all. */
4749 + if (one & NETIF_F_GSO_ROBUST)
4750 + all |= NETIF_F_GSO_ROBUST;
4752 + all &= one | NETIF_F_LLTX;
4754 + if (!(all & NETIF_F_ALL_CSUM))
4755 + all &= ~NETIF_F_SG;
4756 + if (!(all & NETIF_F_SG))
4757 + all &= ~NETIF_F_GSO_MASK;
4761 +EXPORT_SYMBOL(netdev_compute_features);
4763 +static struct hlist_head *netdev_create_hash(void)
4766 + struct hlist_head *hash;
4768 + hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
4770 + for (i = 0; i < NETDEV_HASHENTRIES; i++)
4771 + INIT_HLIST_HEAD(&hash[i]);
4776 +/* Initialize per network namespace state */
4777 +static int __net_init netdev_init(struct net *net)
4779 + INIT_LIST_HEAD(&net->dev_base_head);
4781 + net->dev_name_head = netdev_create_hash();
4782 + if (net->dev_name_head == NULL)
4785 + net->dev_index_head = netdev_create_hash();
4786 + if (net->dev_index_head == NULL)
4792 + kfree(net->dev_name_head);
4797 +char *netdev_drivername(struct net_device *dev, char *buffer, int len)
4799 + struct device_driver *driver;
4800 + struct device *parent;
4802 + if (len <= 0 || !buffer)
4806 + parent = dev->dev.parent;
4811 + driver = parent->driver;
4812 + if (driver && driver->name)
4813 + strlcpy(buffer, driver->name, len);
4817 +static void __net_exit netdev_exit(struct net *net)
4819 + kfree(net->dev_name_head);
4820 + kfree(net->dev_index_head);
4823 +static struct pernet_operations __net_initdata netdev_net_ops = {
4824 + .init = netdev_init,
4825 + .exit = netdev_exit,
4828 +static void __net_exit default_device_exit(struct net *net)
4830 + struct net_device *dev, *next;
4832 + * Push all migratable of the network devices back to the
4833 + * initial network namespace
4836 + for_each_netdev_safe(net, dev, next) {
4838 + char fb_name[IFNAMSIZ];
4840 + /* Ignore unmoveable devices (i.e. loopback) */
4841 + if (dev->features & NETIF_F_NETNS_LOCAL)
4844 + /* Push remaing network devices to init_net */
4845 + snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
4846 + err = dev_change_net_namespace(dev, &init_net, fb_name);
4848 + printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
4849 + __func__, dev->name, err);
4856 +static struct pernet_operations __net_initdata default_device_ops = {
4857 + .exit = default_device_exit,
4861 + * Initialize the DEV module. At boot time this walks the device list and
4862 + * unhooks any devices that fail to initialise (normally hardware not
4863 + * present) and leaves us with a valid list of present and active devices.
4868 + * This is called single threaded during boot, so no need
4869 + * to take the rtnl semaphore.
4871 +static int __init net_dev_init(void)
4873 + int i, rc = -ENOMEM;
4875 + BUG_ON(!dev_boot_phase);
4877 + if (dev_proc_init())
4880 + if (netdev_kobject_init())
4883 + INIT_LIST_HEAD(&ptype_all);
4884 + for (i = 0; i < PTYPE_HASH_SIZE; i++)
4885 + INIT_LIST_HEAD(&ptype_base[i]);
4887 + if (register_pernet_subsys(&netdev_net_ops))
4890 + if (register_pernet_device(&default_device_ops))
4894 + * Initialise the packet receive queues.
4897 + for_each_possible_cpu(i) {
4898 + struct softnet_data *queue;
4900 + queue = &per_cpu(softnet_data, i);
4901 + skb_queue_head_init(&queue->input_pkt_queue);
4902 + queue->completion_queue = NULL;
4903 + INIT_LIST_HEAD(&queue->poll_list);
4905 + queue->backlog.poll = process_backlog;
4906 + queue->backlog.weight = weight_p;
4909 + netdev_dma_register();
4911 + dev_boot_phase = 0;
4913 + open_softirq(NET_TX_SOFTIRQ, net_tx_action);
4914 + open_softirq(NET_RX_SOFTIRQ, net_rx_action);
4916 + hotcpu_notifier(dev_cpu_callback, 0);
4924 +subsys_initcall(net_dev_init);
4926 +EXPORT_SYMBOL(__dev_get_by_index);
4927 +EXPORT_SYMBOL(__dev_get_by_name);
4928 +EXPORT_SYMBOL(__dev_remove_pack);
4929 +EXPORT_SYMBOL(dev_valid_name);
4930 +EXPORT_SYMBOL(dev_add_pack);
4931 +EXPORT_SYMBOL(dev_alloc_name);
4932 +EXPORT_SYMBOL(dev_close);
4933 +EXPORT_SYMBOL(dev_get_by_flags);
4934 +EXPORT_SYMBOL(dev_get_by_index);
4935 +EXPORT_SYMBOL(dev_get_by_name);
4936 +EXPORT_SYMBOL(dev_open);
4937 +EXPORT_SYMBOL(dev_ioctl);
4938 +EXPORT_SYMBOL(dev_queue_xmit);
4939 +EXPORT_SYMBOL(dev_remove_pack);
4940 +EXPORT_SYMBOL(dev_set_allmulti);
4941 +EXPORT_SYMBOL(dev_set_promiscuity);
4942 +EXPORT_SYMBOL(dev_change_flags);
4943 +EXPORT_SYMBOL(dev_set_mtu);
4944 +EXPORT_SYMBOL(dev_set_mac_address);
4945 +EXPORT_SYMBOL(free_netdev);
4946 +EXPORT_SYMBOL(netdev_boot_setup_check);
4947 +EXPORT_SYMBOL(netdev_set_master);
4948 +EXPORT_SYMBOL(netdev_state_change);
4949 +EXPORT_SYMBOL(netif_receive_skb);
4950 +EXPORT_SYMBOL(__netif_receive_skb);
4951 +EXPORT_SYMBOL(netif_rx);
4952 +EXPORT_SYMBOL(register_gifconf);
4953 +EXPORT_SYMBOL(register_netdevice);
4954 +EXPORT_SYMBOL(register_netdevice_notifier);
4957 +EXPORT_SYMBOL(register_net_in);
4958 +EXPORT_SYMBOL(unregister_net_in);
4960 +EXPORT_SYMBOL(skb_checksum_help);
4961 +EXPORT_SYMBOL(synchronize_net);
4962 +EXPORT_SYMBOL(unregister_netdevice);
4963 +EXPORT_SYMBOL(unregister_netdevice_notifier);
4964 +EXPORT_SYMBOL(net_enable_timestamp);
4965 +EXPORT_SYMBOL(net_disable_timestamp);
4966 +EXPORT_SYMBOL(dev_get_flags);
4968 +#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4969 +EXPORT_SYMBOL(br_handle_frame_hook);
4970 +EXPORT_SYMBOL(br_fdb_get_hook);
4971 +EXPORT_SYMBOL(br_fdb_put_hook);
4975 +EXPORT_SYMBOL(dev_load);
4978 +EXPORT_PER_CPU_SYMBOL(softnet_data);