#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/bitops.h>
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
dev->queue_lock serializes queue accesses for this device
AND dev->qdisc pointer itself.
- netif_tx_lock serializes accesses to device driver.
+ dev->xmit_lock serializes accesses to device driver.
- dev->queue_lock and netif_tx_lock are mutually exclusive,
+ dev->queue_lock and dev->xmit_lock are mutually exclusive,
if one is grabbed, another must be free.
*/
NOTE: Called under dev->queue_lock with locally disabled BH.
*/
-static inline int qdisc_restart(struct net_device *dev)
+int qdisc_restart(struct net_device *dev)
{
struct Qdisc *q = dev->qdisc;
struct sk_buff *skb;
/* Dequeue packet */
- if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) {
+ if ((skb = q->dequeue(q)) != NULL) {
unsigned nolock = (dev->features & NETIF_F_LLTX);
-
- dev->gso_skb = NULL;
-
/*
* When the driver has LLTX set it does its own locking
* in start_xmit. No need to add additional overhead by
* will be requeued.
*/
if (!nolock) {
- if (!netif_tx_trylock(dev)) {
+ if (!spin_trylock(&dev->xmit_lock)) {
collision:
/* So, someone grabbed the driver. */
__get_cpu_var(netdev_rx_stat).cpu_collision++;
goto requeue;
}
+ /* Remember that the driver is grabbed by us. */
+ dev->xmit_lock_owner = smp_processor_id();
}
{
if (!netif_queue_stopped(dev)) {
int ret;
+ if (netdev_nit)
+ dev_queue_xmit_nit(skb, dev);
- ret = dev_hard_start_xmit(skb, dev);
+ ret = dev->hard_start_xmit(skb, dev);
if (ret == NETDEV_TX_OK) {
if (!nolock) {
- netif_tx_unlock(dev);
+ dev->xmit_lock_owner = -1;
+ spin_unlock(&dev->xmit_lock);
}
spin_lock(&dev->queue_lock);
return -1;
/* NETDEV_TX_BUSY - we need to requeue */
/* Release the driver */
if (!nolock) {
- netif_tx_unlock(dev);
+ dev->xmit_lock_owner = -1;
+ spin_unlock(&dev->xmit_lock);
}
spin_lock(&dev->queue_lock);
q = dev->qdisc;
*/
requeue:
- if (skb->next)
- dev->gso_skb = skb;
- else
- q->ops->requeue(skb, q);
+ q->ops->requeue(skb, q);
netif_schedule(dev);
return 1;
}
return q->q.qlen;
}
-void __qdisc_run(struct net_device *dev)
-{
- if (unlikely(dev->qdisc == &noop_qdisc))
- goto out;
-
- while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev))
- /* NOTHING */;
-
-out:
- clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
-}
-
static void dev_watchdog(unsigned long arg)
{
struct net_device *dev = (struct net_device *)arg;
- netif_tx_lock(dev);
+ spin_lock(&dev->xmit_lock);
if (dev->qdisc != &noop_qdisc) {
if (netif_device_present(dev) &&
netif_running(dev) &&
netif_carrier_ok(dev)) {
if (netif_queue_stopped(dev) &&
- time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) {
-
- printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n",
- dev->name);
+ (jiffies - dev->trans_start) > dev->watchdog_timeo) {
+ printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name);
dev->tx_timeout(dev);
}
if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
dev_hold(dev);
}
}
- netif_tx_unlock(dev);
+ spin_unlock(&dev->xmit_lock);
dev_put(dev);
}
static void dev_watchdog_up(struct net_device *dev)
{
+ spin_lock_bh(&dev->xmit_lock);
__netdev_watchdog_up(dev);
+ spin_unlock_bh(&dev->xmit_lock);
}
static void dev_watchdog_down(struct net_device *dev)
{
- netif_tx_lock_bh(dev);
+ spin_lock_bh(&dev->xmit_lock);
if (del_timer(&dev->watchdog_timer))
- dev_put(dev);
- netif_tx_unlock_bh(dev);
+ __dev_put(dev);
+ spin_unlock_bh(&dev->xmit_lock);
}
void netif_carrier_on(struct net_device *dev)
size = QDISC_ALIGN(sizeof(*sch));
size += ops->priv_size + (QDISC_ALIGNTO - 1);
- p = kzalloc(size, GFP_KERNEL);
+ p = kmalloc(size, GFP_KERNEL);
if (!p)
goto errout;
+ memset(p, 0, size);
sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
sch->padded = (char *) sch - (char *) p;
dev_watchdog_down(dev);
- /* Wait for outstanding dev_queue_xmit calls. */
- synchronize_rcu();
-
- /* Wait for outstanding qdisc_run calls. */
- while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
+ while (test_bit(__LINK_STATE_SCHED, &dev->state))
yield();
- if (dev->gso_skb) {
- kfree_skb(dev->gso_skb);
- dev->gso_skb = NULL;
- }
+ spin_unlock_wait(&dev->xmit_lock);
}
void dev_init_scheduler(struct net_device *dev)
EXPORT_SYMBOL(qdisc_alloc);
EXPORT_SYMBOL(qdisc_destroy);
EXPORT_SYMBOL(qdisc_reset);
+EXPORT_SYMBOL(qdisc_restart);
EXPORT_SYMBOL(qdisc_lock_tree);
EXPORT_SYMBOL(qdisc_unlock_tree);