X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=net%2Fcore%2Fdev.c;h=9ac8c41ea1c68a3f1b857e436839d44440ae369d;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=94565597b689212901cffc86d74fa9d016d93e54;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/net/core/dev.c b/net/core/dev.c index 94565597b..9ac8c41ea 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -74,7 +74,7 @@ #include #include -#include +#include #include #include #include @@ -107,10 +107,13 @@ #include #include #include +#include +#include #ifdef CONFIG_NET_RADIO #include /* Note : will define WIRELESS_EXT */ #include #endif /* CONFIG_NET_RADIO */ +#include #include /* This define, if set, will randomly drop a packet when congestion @@ -153,7 +156,7 @@ * 86DD IPv6 */ -static spinlock_t ptype_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(ptype_lock); static struct list_head ptype_base[16]; /* 16 way hashed list */ static struct list_head ptype_all; /* Taps */ @@ -182,8 +185,8 @@ static struct timer_list samp_timer = TIMER_INITIALIZER(sample_queue, 0, 0); * semaphore held. */ struct net_device *dev_base; -struct net_device **dev_tail = &dev_base; -rwlock_t dev_base_lock = RW_LOCK_UNLOCKED; +static struct net_device **dev_tail = &dev_base; +DEFINE_RWLOCK(dev_base_lock); EXPORT_SYMBOL(dev_base); EXPORT_SYMBOL(dev_base_lock); @@ -215,11 +218,6 @@ static struct notifier_block *netdev_chain; */ DEFINE_PER_CPU(struct softnet_data, softnet_data) = { 0, }; -#ifdef CONFIG_NET_FASTROUTE -int netdev_fastroute; -int netdev_fastroute_obstacles; -#endif - #ifdef CONFIG_SYSFS extern int netdev_sysfs_init(void); extern int netdev_register_sysfs(struct net_device *); @@ -277,12 +275,6 @@ void dev_add_pack(struct packet_type *pt) int hash; spin_lock_bh(&ptype_lock); -#ifdef CONFIG_NET_FASTROUTE - if (pt->af_packet_priv) { - netdev_fastroute_obstacles++; - dev_clear_fastroute(pt->dev); - } -#endif if (pt->type == htons(ETH_P_ALL)) { netdev_nit++; list_add_rcu(&pt->list, &ptype_all); @@ -325,10 +317,6 @@ void __dev_remove_pack(struct packet_type *pt) list_for_each_entry(pt1, head, list) { if (pt == pt1) { -#ifdef CONFIG_NET_FASTROUTE - if (pt->af_packet_priv) - netdev_fastroute_obstacles--; -#endif list_del_rcu(&pt->list); goto out; } @@ -375,7 +363,7 @@ static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; * returns 0 on error and 1 on success. This is a generic routine to * all netdevices. */ -int netdev_boot_setup_add(char *name, struct ifmap *map) +static int netdev_boot_setup_add(char *name, struct ifmap *map) { struct netdev_boot_setup *s; int i; @@ -534,35 +522,6 @@ struct net_device *dev_get_by_name(const char *name) return dev; } -/* - Return value is changed to int to prevent illegal usage in future. - It is still legal to use to check for device existence. - - User should understand, that the result returned by this function - is meaningless, if it was not issued under rtnl semaphore. - */ - -/** - * dev_get - test if a device exists - * @name: name to test for - * - * Test if a name exists. Returns true if the name is found. In order - * to be sure the name is not allocated or removed during the test the - * caller must hold the rtnl semaphore. - * - * This function exists only for back compatibility with older - * drivers. - */ -int __dev_get(const char *name) -{ - struct net_device *dev; - - read_lock(&dev_base_lock); - dev = __dev_get_by_name(name); - read_unlock(&dev_base_lock); - return dev != NULL; -} - /** * __dev_get_by_index - find a device by its ifindex * @ifindex: index of device @@ -637,26 +596,17 @@ struct net_device *dev_getbyhwaddr(unsigned short type, char *ha) return dev; } -struct net_device *__dev_getfirstbyhwtype(unsigned short type) -{ - struct net_device *dev; - - for (dev = dev_base; dev; dev = dev->next) - if (dev->type == type) - break; - return dev; -} - -EXPORT_SYMBOL(__dev_getfirstbyhwtype); - struct net_device *dev_getfirstbyhwtype(unsigned short type) { struct net_device *dev; rtnl_lock(); - dev = __dev_getfirstbyhwtype(type); - if (dev) - dev_hold(dev); + for (dev = dev_base; dev; dev = dev->next) { + if (dev->type == type) { + dev_hold(dev); + break; + } + } rtnl_unlock(); return dev; } @@ -679,32 +629,14 @@ struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mas struct net_device *dev; read_lock(&dev_base_lock); - dev = __dev_get_by_flags(if_flags, mask); - if (dev) - dev_hold(dev); - read_unlock(&dev_base_lock); - return dev; -} - -/** - * __dev_get_by_flags - find any device with given flags - * @if_flags: IFF_* values - * @mask: bitmask of bits in if_flags to check - * - * Search for any interface with the given flags. Returns NULL if a device - * is not found or a pointer to the device. The caller must hold either - * the RTNL semaphore or @dev_base_lock. - */ - -struct net_device *__dev_get_by_flags(unsigned short if_flags, unsigned short mask) -{ - struct net_device *dev; - for (dev = dev_base; dev != NULL; dev = dev->next) { - if (((dev->flags ^ if_flags) & mask) == 0) - return dev; + if (((dev->flags ^ if_flags) & mask) == 0) { + dev_hold(dev); + break; + } } - return NULL; + read_unlock(&dev_base_lock); + return dev; } /** @@ -714,7 +646,7 @@ struct net_device *__dev_get_by_flags(unsigned short if_flags, unsigned short ma * Network device names need to be valid file names to * to allow sysfs to work */ -int dev_valid_name(const char *name) +static int dev_valid_name(const char *name) { return !(*name == '\0' || !strcmp(name, ".") @@ -875,18 +807,6 @@ static int default_rebuild_header(struct sk_buff *skb) } -/* - * Some old buggy device drivers change get_stats after registering - * the device. Try and trap them here. - * This can be elimnated when all devices are known fixed. - */ -static inline int get_stats_changed(struct net_device *dev) -{ - int changed = dev->last_stats != dev->get_stats; - dev->last_stats = dev->get_stats; - return changed; -} - /** * dev_open - prepare an interface for use. * @dev: device to open @@ -910,14 +830,6 @@ int dev_open(struct net_device *dev) if (dev->flags & IFF_UP) return 0; - /* - * Check for broken device drivers. - */ - if (get_stats_changed(dev) && net_ratelimit()) { - printk(KERN_ERR "%s: driver changed get_stats after register\n", - dev->name); - } - /* * Is it even present? */ @@ -934,14 +846,6 @@ int dev_open(struct net_device *dev) clear_bit(__LINK_STATE_START, &dev->state); } - /* - * Check for more broken device drivers. - */ - if (get_stats_changed(dev) && net_ratelimit()) { - printk(KERN_ERR "%s: driver changed get_stats in open\n", - dev->name); - } - /* * If it went open OK then: */ @@ -970,39 +874,6 @@ int dev_open(struct net_device *dev) return ret; } -#ifdef CONFIG_NET_FASTROUTE - -static void dev_do_clear_fastroute(struct net_device *dev) -{ - if (dev->accept_fastpath) { - int i; - - for (i = 0; i <= NETDEV_FASTROUTE_HMASK; i++) { - struct dst_entry *dst; - - write_lock_irq(&dev->fastpath_lock); - dst = dev->fastpath[i]; - dev->fastpath[i] = NULL; - write_unlock_irq(&dev->fastpath_lock); - - dst_release(dst); - } - } -} - -void dev_clear_fastroute(struct net_device *dev) -{ - if (dev) { - dev_do_clear_fastroute(dev); - } else { - read_lock(&dev_base_lock); - for (dev = dev_base; dev; dev = dev->next) - dev_do_clear_fastroute(dev); - read_unlock(&dev_base_lock); - } -} -#endif - /** * dev_close - shutdown an interface. * @dev: device to shutdown @@ -1055,9 +926,6 @@ int dev_close(struct net_device *dev) */ dev->flags &= ~IFF_UP; -#ifdef CONFIG_NET_FASTROUTE - dev_clear_fastroute(dev); -#endif /* * Tell people we are down @@ -1135,6 +1003,29 @@ int call_netdevice_notifiers(unsigned long val, void *v) return notifier_call_chain(&netdev_chain, val, v); } +/* When > 0 there are consumers of rx skb time stamps */ +static atomic_t netstamp_needed = ATOMIC_INIT(0); + +void net_enable_timestamp(void) +{ + atomic_inc(&netstamp_needed); +} + +void net_disable_timestamp(void) +{ + atomic_dec(&netstamp_needed); +} + +static inline void net_timestamp(struct timeval *stamp) +{ + if (atomic_read(&netstamp_needed)) + do_gettimeofday(stamp); + else { + stamp->tv_sec = 0; + stamp->tv_usec = 0; + } +} + /* * Support routine. Sends outgoing frames to any network * taps currently in use. @@ -1184,40 +1075,34 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) * Invalidate hardware checksum when packet is to be mangled, and * complete checksum manually on outgoing path. */ -int skb_checksum_help(struct sk_buff **pskb, int inward) +int skb_checksum_help(struct sk_buff *skb, int inward) { unsigned int csum; - int ret = 0, offset = (*pskb)->h.raw - (*pskb)->data; + int ret = 0, offset = skb->h.raw - skb->data; if (inward) { - (*pskb)->ip_summed = CHECKSUM_NONE; + skb->ip_summed = CHECKSUM_NONE; goto out; } - if (skb_shared(*pskb) || skb_cloned(*pskb)) { - struct sk_buff *newskb = skb_copy(*pskb, GFP_ATOMIC); - if (!newskb) { - ret = -ENOMEM; + if (skb_cloned(skb)) { + ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (ret) goto out; - } - if ((*pskb)->sk) - skb_set_owner_w(newskb, (*pskb)->sk); - kfree_skb(*pskb); - *pskb = newskb; } - if (offset > (int)(*pskb)->len) + if (offset > (int)skb->len) BUG(); - csum = skb_checksum(*pskb, offset, (*pskb)->len-offset, 0); + csum = skb_checksum(skb, offset, skb->len-offset, 0); - offset = (*pskb)->tail - (*pskb)->h.raw; + offset = skb->tail - skb->h.raw; if (offset <= 0) BUG(); - if ((*pskb)->csum + 2 > offset) + if (skb->csum + 2 > offset) BUG(); - *(u16*)((*pskb)->h.raw + (*pskb)->csum) = csum_fold(csum); - (*pskb)->ip_summed = CHECKSUM_NONE; + *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); + skb->ip_summed = CHECKSUM_NONE; out: return ret; } @@ -1236,7 +1121,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) return 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) - if (skb_shinfo(skb)->frags[i].page >= highmem_start_page) + if (PageHighMem(skb_shinfo(skb)->frags[i].page)) return 1; return 0; @@ -1305,6 +1190,20 @@ int __skb_linearize(struct sk_buff *skb, int gfp_mask) return 0; } +#define HARD_TX_LOCK(dev, cpu) { \ + if ((dev->features & NETIF_F_LLTX) == 0) { \ + spin_lock(&dev->xmit_lock); \ + dev->xmit_lock_owner = cpu; \ + } \ +} + +#define HARD_TX_UNLOCK(dev) { \ + if ((dev->features & NETIF_F_LLTX) == 0) { \ + dev->xmit_lock_owner = -1; \ + spin_unlock(&dev->xmit_lock); \ + } \ +} + /** * dev_queue_xmit - transmit a buffer * @skb: buffer to transmit @@ -1345,18 +1244,39 @@ int dev_queue_xmit(struct sk_buff *skb) (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) && (!(dev->features & NETIF_F_IP_CSUM) || skb->protocol != htons(ETH_P_IP)))) - if (skb_checksum_help(&skb, 0)) + if (skb_checksum_help(skb, 0)) goto out_kfree_skb; - /* Grab device queue */ - spin_lock_bh(&dev->queue_lock); - q = dev->qdisc; + /* Disable soft irqs for various locks below. Also + * stops preemption for RCU. + */ + local_bh_disable(); + + /* Updates of qdisc are serialized by queue_lock. + * The struct Qdisc which is pointed to by qdisc is now a + * rcu structure - it may be accessed without acquiring + * a lock (but the structure may be stale.) The freeing of the + * qdisc will be deferred until it's known that there are no + * more references to it. + * + * If the qdisc has an enqueue function, we still need to + * hold the queue_lock before calling it, since queue_lock + * also serializes access to the device queue. + */ + + q = rcu_dereference(dev->qdisc); +#ifdef CONFIG_NET_CLS_ACT + skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); +#endif if (q->enqueue) { + /* Grab device queue */ + spin_lock(&dev->queue_lock); + rc = q->enqueue(skb, q); qdisc_run(dev); - spin_unlock_bh(&dev->queue_lock); + spin_unlock(&dev->queue_lock); rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; goto out; } @@ -1374,18 +1294,11 @@ int dev_queue_xmit(struct sk_buff *skb) Either shot noqueue qdisc, it is even simpler 8) */ if (dev->flags & IFF_UP) { - int cpu = smp_processor_id(); + int cpu = smp_processor_id(); /* ok because BHs are off */ if (dev->xmit_lock_owner != cpu) { - /* - * The spin_lock effectivly does a preempt lock, but - * we are about to drop that... - */ - preempt_disable(); - spin_unlock(&dev->queue_lock); - spin_lock(&dev->xmit_lock); - dev->xmit_lock_owner = cpu; - preempt_enable(); + + HARD_TX_LOCK(dev, cpu); if (!netif_queue_stopped(dev)) { if (netdev_nit) @@ -1393,17 +1306,14 @@ int dev_queue_xmit(struct sk_buff *skb) rc = 0; if (!dev->hard_start_xmit(skb, dev)) { - dev->xmit_lock_owner = -1; - spin_unlock_bh(&dev->xmit_lock); + HARD_TX_UNLOCK(dev); goto out; } } - dev->xmit_lock_owner = -1; - spin_unlock_bh(&dev->xmit_lock); + HARD_TX_UNLOCK(dev); if (net_ratelimit()) printk(KERN_CRIT "Virtual device %s asks to " "queue packet!\n", dev->name); - goto out_enetdown; } else { /* Recursion is detected! It is possible, * unfortunately */ @@ -1412,12 +1322,15 @@ int dev_queue_xmit(struct sk_buff *skb) "%s, fix it urgently!\n", dev->name); } } - spin_unlock_bh(&dev->queue_lock); -out_enetdown: + rc = -ENETDOWN; + local_bh_enable(); + out_kfree_skb: kfree_skb(skb); + return rc; out: + local_bh_enable(); return rc; } @@ -1440,66 +1353,6 @@ int mod_cong = 290; DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; -#ifdef CONFIG_NET_HW_FLOWCONTROL -atomic_t netdev_dropping = ATOMIC_INIT(0); -static unsigned long netdev_fc_mask = 1; -unsigned long netdev_fc_xoff; -spinlock_t netdev_fc_lock = SPIN_LOCK_UNLOCKED; - -static struct -{ - void (*stimul)(struct net_device *); - struct net_device *dev; -} netdev_fc_slots[BITS_PER_LONG]; - -int netdev_register_fc(struct net_device *dev, - void (*stimul)(struct net_device *dev)) -{ - int bit = 0; - unsigned long flags; - - spin_lock_irqsave(&netdev_fc_lock, flags); - if (netdev_fc_mask != ~0UL) { - bit = ffz(netdev_fc_mask); - netdev_fc_slots[bit].stimul = stimul; - netdev_fc_slots[bit].dev = dev; - set_bit(bit, &netdev_fc_mask); - clear_bit(bit, &netdev_fc_xoff); - } - spin_unlock_irqrestore(&netdev_fc_lock, flags); - return bit; -} - -void netdev_unregister_fc(int bit) -{ - unsigned long flags; - - spin_lock_irqsave(&netdev_fc_lock, flags); - if (bit > 0) { - netdev_fc_slots[bit].stimul = NULL; - netdev_fc_slots[bit].dev = NULL; - clear_bit(bit, &netdev_fc_mask); - clear_bit(bit, &netdev_fc_xoff); - } - spin_unlock_irqrestore(&netdev_fc_lock, flags); -} - -static void netdev_wakeup(void) -{ - unsigned long xoff; - - spin_lock(&netdev_fc_lock); - xoff = netdev_fc_xoff; - netdev_fc_xoff = 0; - while (xoff) { - int i = ffz(~xoff); - xoff &= ~(1 << i); - netdev_fc_slots[i].stimul(netdev_fc_slots[i].dev); - } - spin_unlock(&netdev_fc_lock); -} -#endif - static void get_sample_stats(int cpu) { #ifdef RAND_LIE @@ -1575,7 +1428,7 @@ int netif_rx(struct sk_buff *skb) struct softnet_data *queue; unsigned long flags; -#ifdef CONFIG_NETPOLL_RX +#ifdef CONFIG_NETPOLL if (skb->dev->netpoll_rx && netpoll_rx(skb)) { kfree_skb(skb); return NET_RX_DROP; @@ -1609,13 +1462,8 @@ enqueue: return queue->cng_level; } - if (queue->throttle) { + if (queue->throttle) queue->throttle = 0; -#ifdef CONFIG_NET_HW_FLOWCONTROL - if (atomic_dec_and_test(&netdev_dropping)) - netdev_wakeup(); -#endif - } netif_rx_schedule(&queue->backlog_dev); goto enqueue; @@ -1624,9 +1472,6 @@ enqueue: if (!queue->throttle) { queue->throttle = 1; __get_cpu_var(netdev_rx_stat).throttled++; -#ifdef CONFIG_NET_HW_FLOWCONTROL - atomic_inc(&netdev_dropping); -#endif } drop: @@ -1637,6 +1482,21 @@ drop: return NET_RX_DROP; } +int netif_rx_ni(struct sk_buff *skb) +{ + int err; + + preempt_disable(); + err = netif_rx(skb); + if (local_softirq_pending()) + do_softirq(); + preempt_enable(); + + return err; +} + +EXPORT_SYMBOL(netif_rx_ni); + static __inline__ void skb_bond(struct sk_buff *skb) { struct net_device *dev = skb->dev; @@ -1694,42 +1554,75 @@ static void net_tx_action(struct softirq_action *h) } static __inline__ int deliver_skb(struct sk_buff *skb, - struct packet_type *pt_prev, int last) + struct packet_type *pt_prev) { atomic_inc(&skb->users); return pt_prev->func(skb, skb->dev, pt_prev); } - #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE) -int (*br_handle_frame_hook)(struct sk_buff *skb); +int (*br_handle_frame_hook)(struct net_bridge_port *p, struct sk_buff **pskb); -static __inline__ int handle_bridge(struct sk_buff *skb, - struct packet_type *pt_prev) +static __inline__ int handle_bridge(struct sk_buff **pskb, + struct packet_type **pt_prev, int *ret) { - int ret = NET_RX_DROP; - if (pt_prev) - ret = deliver_skb(skb, pt_prev, 0); + struct net_bridge_port *port; - return ret; -} + if ((*pskb)->pkt_type == PACKET_LOOPBACK || + (port = rcu_dereference((*pskb)->dev->br_port)) == NULL) + return 0; + if (*pt_prev) { + *ret = deliver_skb(*pskb, *pt_prev); + *pt_prev = NULL; + } + + return br_handle_frame_hook(port, pskb); +} +#else +#define handle_bridge(skb, pt_prev, ret) (0) #endif -static inline int __handle_bridge(struct sk_buff *skb, - struct packet_type **pt_prev, int *ret) +#ifdef CONFIG_NET_CLS_ACT +/* TODO: Maybe we should just force sch_ingress to be compiled in + * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions + * a compare and 2 stores extra right now if we dont have it on + * but have CONFIG_NET_CLS_ACT + * NOTE: This doesnt stop any functionality; if you dont have + * the ingress scheduler, you just cant add policies on ingress. + * + */ +static int ing_filter(struct sk_buff *skb) { -#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) - if (skb->dev->br_port && skb->pkt_type != PACKET_LOOPBACK) { - *ret = handle_bridge(skb, *pt_prev); - if (br_handle_frame_hook(skb) == 0) - return 1; + struct Qdisc *q; + struct net_device *dev = skb->dev; + int result = TC_ACT_OK; + + if (dev->qdisc_ingress) { + __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); + if (MAX_RED_LOOP < ttl++) { + printk("Redir loop detected Dropping packet (%s->%s)\n", + skb->input_dev?skb->input_dev->name:"??",skb->dev->name); + return TC_ACT_SHOT; + } + + skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl); + + skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS); + if (NULL == skb->input_dev) { + skb->input_dev = skb->dev; + printk("ing_filter: fixed %s out %s\n",skb->input_dev->name,skb->dev->name); + } + spin_lock(&dev->ingress_lock); + if ((q = dev->qdisc_ingress) != NULL) + result = q->enqueue(skb, q); + spin_unlock(&dev->ingress_lock); - *pt_prev = NULL; } -#endif - return 0; + + return result; } +#endif int netif_receive_skb(struct sk_buff *skb) { @@ -1737,7 +1630,7 @@ int netif_receive_skb(struct sk_buff *skb) int ret = NET_RX_DROP; unsigned short type; -#ifdef CONFIG_NETPOLL_RX +#ifdef CONFIG_NETPOLL if (skb->dev->netpoll_rx && skb->dev->poll && netpoll_rx(skb)) { kfree_skb(skb); return NET_RX_DROP; @@ -1751,29 +1644,50 @@ int netif_receive_skb(struct sk_buff *skb) __get_cpu_var(netdev_rx_stat).total++; -#ifdef CONFIG_NET_FASTROUTE - if (skb->pkt_type == PACKET_FASTROUTE) { - __get_cpu_var(netdev_rx_stat).fastroute_deferred_out++; - return dev_queue_xmit(skb); - } -#endif - skb->h.raw = skb->nh.raw = skb->data; skb->mac_len = skb->nh.raw - skb->mac.raw; pt_prev = NULL; + rcu_read_lock(); + +#ifdef CONFIG_NET_CLS_ACT + if (skb->tc_verd & TC_NCLS) { + skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); + goto ncls; + } +#endif + list_for_each_entry_rcu(ptype, &ptype_all, list) { if (!ptype->dev || ptype->dev == skb->dev) { if (pt_prev) - ret = deliver_skb(skb, pt_prev, 0); + ret = deliver_skb(skb, pt_prev); pt_prev = ptype; } } +#ifdef CONFIG_NET_CLS_ACT + if (pt_prev) { + ret = deliver_skb(skb, pt_prev); + pt_prev = NULL; /* noone else should process this after*/ + } else { + skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); + } + + ret = ing_filter(skb); + + if (ret == TC_ACT_SHOT || (ret == TC_ACT_STOLEN)) { + kfree_skb(skb); + goto out; + } + + skb->tc_verd = 0; +ncls: +#endif + handle_diverter(skb); - if (__handle_bridge(skb, &pt_prev, &ret)) + if (handle_bridge(&skb, &pt_prev, &ret)) goto out; type = skb->protocol; @@ -1781,7 +1695,7 @@ int netif_receive_skb(struct sk_buff *skb) if (ptype->type == type && (!ptype->dev || ptype->dev == skb->dev)) { if (pt_prev) - ret = deliver_skb(skb, pt_prev, 0); + ret = deliver_skb(skb, pt_prev); pt_prev = ptype; } } @@ -1829,16 +1743,6 @@ static int process_backlog(struct net_device *backlog_dev, int *budget) if (work >= quota || jiffies - start_time > 1) break; -#ifdef CONFIG_NET_HW_FLOWCONTROL - if (queue->throttle && - queue->input_pkt_queue.qlen < no_cong_thresh ) { - queue->throttle = 0; - if (atomic_dec_and_test(&netdev_dropping)) { - netdev_wakeup(); - break; - } - } -#endif } backlog_dev->quota -= work; @@ -1853,13 +1757,8 @@ job_done: smp_mb__before_clear_bit(); netif_poll_enable(backlog_dev); - if (queue->throttle) { + if (queue->throttle) queue->throttle = 0; -#ifdef CONFIG_NET_HW_FLOWCONTROL - if (atomic_dec_and_test(&netdev_dropping)) - netdev_wakeup(); -#endif - } local_irq_enable(); return 0; } @@ -1996,7 +1895,8 @@ static int dev_ifconf(char __user *arg) total = 0; for (dev = dev_base; dev; dev = dev->next) { - if (!dev_in_nx_info(dev, current->nx_info)) + if (vx_flags(VXF_HIDE_NETIF, 0) && + !dev_in_nx_info(dev, current->nx_info)) continue; for (i = 0; i < NPROTO; i++) { if (gifconf_list[i]) { @@ -2060,7 +1960,7 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) { struct nx_info *nxi = current->nx_info; - if (!dev_in_nx_info(dev, nxi)) + if (vx_flags(VXF_HIDE_NETIF, 0) && !dev_in_nx_info(dev, nxi)) return; if (dev->get_stats) { struct net_device_stats *stats = dev->get_stats(dev); @@ -2275,13 +2175,6 @@ void dev_set_promiscuity(struct net_device *dev, int inc) if ((dev->promiscuity += inc) == 0) dev->flags &= ~IFF_PROMISC; if (dev->flags ^ old_flags) { -#ifdef CONFIG_NET_FASTROUTE - if (dev->flags & IFF_PROMISC) { - netdev_fastroute_obstacles++; - dev_clear_fastroute(dev); - } else - netdev_fastroute_obstacles--; -#endif dev_mc_upload(dev); printk(KERN_INFO "device %s %s promiscuous mode\n", dev->name, (dev->flags & IFF_PROMISC) ? "entered" : @@ -2452,8 +2345,11 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) return dev_set_mtu(dev, ifr->ifr_mtu); case SIOCGIFHWADDR: - memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, - min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); + if (!dev->addr_len) + memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); + else + memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, + min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); ifr->ifr_hwaddr.sa_family = dev->type; return 0; @@ -2754,7 +2650,7 @@ int dev_ioctl(unsigned int cmd, void __user *arg) /* Follow me in net/core/wireless.c */ ret = wireless_process_ioctl(&ifr, cmd); rtnl_unlock(); - if (!ret && IW_IS_GET(cmd) && + if (IW_IS_GET(cmd) && copy_to_user(arg, &ifr, sizeof(struct ifreq))) ret = -EFAULT; @@ -2773,7 +2669,7 @@ int dev_ioctl(unsigned int cmd, void __user *arg) * number. The caller must hold the rtnl semaphore or the * dev_base_lock to be sure it remains unique. */ -int dev_new_index(void) +static int dev_new_index(void) { static int ifindex; for (;;) { @@ -2787,7 +2683,7 @@ int dev_new_index(void) static int dev_boot_phase = 1; /* Delayed registration/unregisteration */ -static spinlock_t net_todo_list_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(net_todo_list_lock); static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list); static inline void net_set_todo(struct net_device *dev) @@ -2806,8 +2702,7 @@ static inline void net_set_todo(struct net_device *dev) * chain. 0 is returned on success. A negative errno code is returned * on a failure to set up the device, or if the name is a duplicate. * - * Callers must hold the rtnl semaphore. See the comment at the - * end of Space.c for details about the locking. You may want + * Callers must hold the rtnl semaphore. You may want * register_netdev() instead of this. * * BUGS: @@ -2830,8 +2725,8 @@ int register_netdevice(struct net_device *dev) spin_lock_init(&dev->queue_lock); spin_lock_init(&dev->xmit_lock); dev->xmit_lock_owner = -1; -#ifdef CONFIG_NET_FASTROUTE - dev->fastpath_lock = RW_LOCK_UNLOCKED; +#ifdef CONFIG_NET_CLS_ACT + spin_lock_init(&dev->ingress_lock); #endif ret = alloc_divert_blk(dev); @@ -2880,6 +2775,14 @@ int register_netdevice(struct net_device *dev) dev->features &= ~NETIF_F_SG; } + /* TSO requires that SG is present as well. */ + if ((dev->features & NETIF_F_TSO) && + !(dev->features & NETIF_F_SG)) { + printk("%s: Dropping NETIF_F_TSO since no SG feature.\n", + dev->name); + dev->features &= ~NETIF_F_TSO; + } + /* * nil rebuild_header routine, * that should be never called and used as just bug trap. @@ -2920,6 +2823,51 @@ out_err: goto out; } +/** + * register_netdev - register a network device + * @dev: device to register + * + * Take a completed network device structure and add it to the kernel + * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier + * chain. 0 is returned on success. A negative errno code is returned + * on a failure to set up the device, or if the name is a duplicate. + * + * This is a wrapper around register_netdev that takes the rtnl semaphore + * and expands the device name if you passed a format string to + * alloc_netdev. + */ +int register_netdev(struct net_device *dev) +{ + int err; + + rtnl_lock(); + + /* + * If the name is a format string the caller wants us to do a + * name allocation. + */ + if (strchr(dev->name, '%')) { + err = dev_alloc_name(dev, dev->name); + if (err < 0) + goto out; + } + + /* + * Back compatibility hook. Kill this one in 2.5 + */ + if (dev->name[0] == 0 || dev->name[0] == ' ') { + err = dev_alloc_name(dev, "eth%d"); + if (err < 0) + goto out; + } + + err = register_netdevice(dev); +out: + rtnl_unlock(); + return err; +} +EXPORT_SYMBOL(register_netdev); + /* * netdev_wait_allrefs - wait until all references are gone. * @@ -2939,7 +2887,6 @@ static void netdev_wait_allrefs(struct net_device *dev) while (atomic_read(&dev->refcnt) != 0) { if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { rtnl_shlock(); - rtnl_exlock(); /* Rebroadcast unregister notification */ notifier_call_chain(&netdev_chain, @@ -2956,14 +2903,12 @@ static void netdev_wait_allrefs(struct net_device *dev) linkwatch_run_queue(); } - rtnl_exunlock(); rtnl_shunlock(); rebroadcast_time = jiffies; } - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(HZ / 4); + msleep(250); if (time_after(jiffies, warning_time + 10 * HZ)) { printk(KERN_EMERG "unregister_netdevice: " @@ -3064,6 +3009,46 @@ out: up(&net_todo_run_mutex); } +/** + * alloc_netdev - allocate network device + * @sizeof_priv: size of private data to allocate space for + * @name: device name format string + * @setup: callback to initialize device + * + * Allocates a struct net_device with private data area for driver use + * and performs basic initialization. + */ +struct net_device *alloc_netdev(int sizeof_priv, const char *name, + void (*setup)(struct net_device *)) +{ + void *p; + struct net_device *dev; + int alloc_size; + + /* ensure 32-byte alignment of both the device and private area */ + alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; + alloc_size += sizeof_priv + NETDEV_ALIGN_CONST; + + p = kmalloc(alloc_size, GFP_KERNEL); + if (!p) { + printk(KERN_ERR "alloc_dev: Unable to allocate device.\n"); + return NULL; + } + memset(p, 0, alloc_size); + + dev = (struct net_device *) + (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); + dev->padded = (char *)dev - (char *)p; + + if (sizeof_priv) + dev->priv = netdev_priv(dev); + + setup(dev); + strcpy(dev->name, name); + return dev; +} +EXPORT_SYMBOL(alloc_netdev); + /** * free_netdev - free network device * @dev: device @@ -3106,8 +3091,7 @@ void synchronize_net(void) * from the kernel tables. On success 0 is returned, on a failure * a negative errno code is returned. * - * Callers must hold the rtnl semaphore. See the comment at the - * end of Space.c for details about the locking. You may want + * Callers must hold the rtnl semaphore. You may want * unregister_netdev() instead of this. */ @@ -3154,10 +3138,6 @@ int unregister_netdevice(struct net_device *dev) synchronize_net(); -#ifdef CONFIG_NET_FASTROUTE - dev_clear_fastroute(dev); -#endif - /* Shutdown queueing discipline. */ dev_shutdown(dev); @@ -3183,10 +3163,33 @@ int unregister_netdevice(struct net_device *dev) /* Finish processing unregister after unlock */ net_set_todo(dev); + synchronize_net(); + dev_put(dev); return 0; } +/** + * unregister_netdev - remove device from the kernel + * @dev: device + * + * This function shuts down a device interface and removes it + * from the kernel tables. On success 0 is returned, on a failure + * a negative errno code is returned. + * + * This is just a wrapper for unregister_netdevice that takes + * the rtnl semaphore. In general you want to use this and not + * unregister_netdevice. + */ +void unregister_netdev(struct net_device *dev) +{ + rtnl_lock(); + unregister_netdevice(dev); + rtnl_unlock(); +} + +EXPORT_SYMBOL(unregister_netdev); + #ifdef CONFIG_HOTPLUG_CPU static int dev_cpu_callback(struct notifier_block *nfb, unsigned long action, @@ -3251,6 +3254,8 @@ static int __init net_dev_init(void) BUG_ON(!dev_boot_phase); + net_random_init(); + if (dev_proc_init()) goto out; @@ -3307,28 +3312,24 @@ out: subsys_initcall(net_dev_init); -EXPORT_SYMBOL(__dev_get); -EXPORT_SYMBOL(__dev_get_by_flags); EXPORT_SYMBOL(__dev_get_by_index); EXPORT_SYMBOL(__dev_get_by_name); EXPORT_SYMBOL(__dev_remove_pack); EXPORT_SYMBOL(__skb_linearize); -EXPORT_SYMBOL(call_netdevice_notifiers); EXPORT_SYMBOL(dev_add_pack); EXPORT_SYMBOL(dev_alloc_name); EXPORT_SYMBOL(dev_close); EXPORT_SYMBOL(dev_get_by_flags); EXPORT_SYMBOL(dev_get_by_index); EXPORT_SYMBOL(dev_get_by_name); -EXPORT_SYMBOL(dev_getbyhwaddr); EXPORT_SYMBOL(dev_ioctl); -EXPORT_SYMBOL(dev_new_index); EXPORT_SYMBOL(dev_open); EXPORT_SYMBOL(dev_queue_xmit); -EXPORT_SYMBOL(dev_queue_xmit_nit); EXPORT_SYMBOL(dev_remove_pack); EXPORT_SYMBOL(dev_set_allmulti); EXPORT_SYMBOL(dev_set_promiscuity); +EXPORT_SYMBOL(dev_change_flags); +EXPORT_SYMBOL(dev_set_mtu); EXPORT_SYMBOL(free_netdev); EXPORT_SYMBOL(netdev_boot_setup_check); EXPORT_SYMBOL(netdev_set_master); @@ -3342,26 +3343,15 @@ EXPORT_SYMBOL(skb_checksum_help); EXPORT_SYMBOL(synchronize_net); EXPORT_SYMBOL(unregister_netdevice); EXPORT_SYMBOL(unregister_netdevice_notifier); +EXPORT_SYMBOL(net_enable_timestamp); +EXPORT_SYMBOL(net_disable_timestamp); #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) EXPORT_SYMBOL(br_handle_frame_hook); #endif -/* for 801q VLAN support */ -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) -EXPORT_SYMBOL(dev_change_flags); -#endif + #ifdef CONFIG_KMOD EXPORT_SYMBOL(dev_load); #endif -#ifdef CONFIG_NET_HW_FLOWCONTROL -EXPORT_SYMBOL(netdev_dropping); -EXPORT_SYMBOL(netdev_fc_xoff); -EXPORT_SYMBOL(netdev_register_fc); -EXPORT_SYMBOL(netdev_unregister_fc); -#endif -#ifdef CONFIG_NET_FASTROUTE -EXPORT_SYMBOL(netdev_fastroute); -EXPORT_SYMBOL(netdev_fastroute_obstacles); -#endif EXPORT_PER_CPU_SYMBOL(softnet_data);