The idea is the following:
- enqueue, dequeue are serialized via top level device
spinlock dev->queue_lock.
- - tree walking is protected by read_lock_bh(qdisc_tree_lock)
+ - tree walking is protected by read_lock(qdisc_tree_lock)
and this lock is used only in process context.
- - updates to tree are made under rtnl semaphore or
- from softirq context (__qdisc_destroy rcu-callback)
- hence this lock needs local bh disabling.
+ - updates to tree are made only under rtnl semaphore,
+ hence this lock may be made without local bh disabling.
qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
*/
void qdisc_lock_tree(struct net_device *dev)
{
- write_lock_bh(&qdisc_tree_lock);
+ write_lock(&qdisc_tree_lock);
spin_lock_bh(&dev->queue_lock);
}
void qdisc_unlock_tree(struct net_device *dev)
{
spin_unlock_bh(&dev->queue_lock);
- write_unlock_bh(&qdisc_tree_lock);
+ write_unlock(&qdisc_tree_lock);
}
/*
dev->queue_lock serializes queue accesses for this device
AND dev->qdisc pointer itself.
- netif_tx_lock serializes accesses to device driver.
+ dev->xmit_lock serializes accesses to device driver.
- dev->queue_lock and netif_tx_lock are mutually exclusive,
+ dev->queue_lock and dev->xmit_lock are mutually exclusive,
if one is grabbed, another must be free.
*/
NOTE: Called under dev->queue_lock with locally disabled BH.
*/
-static inline int qdisc_restart(struct net_device *dev)
+int qdisc_restart(struct net_device *dev)
{
struct Qdisc *q = dev->qdisc;
struct sk_buff *skb;
/* Dequeue packet */
- if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) {
+ if ((skb = q->dequeue(q)) != NULL) {
unsigned nolock = (dev->features & NETIF_F_LLTX);
-
- dev->gso_skb = NULL;
-
/*
* When the driver has LLTX set it does its own locking
* in start_xmit. No need to add additional overhead by
* will be requeued.
*/
if (!nolock) {
- if (!netif_tx_trylock(dev)) {
+ if (!spin_trylock(&dev->xmit_lock)) {
collision:
/* So, someone grabbed the driver. */
__get_cpu_var(netdev_rx_stat).cpu_collision++;
goto requeue;
}
+ /* Remember that the driver is grabbed by us. */
+ dev->xmit_lock_owner = smp_processor_id();
}
{
if (!netif_queue_stopped(dev)) {
int ret;
+ if (netdev_nit)
+ dev_queue_xmit_nit(skb, dev);
- ret = dev_hard_start_xmit(skb, dev);
+ ret = dev->hard_start_xmit(skb, dev);
if (ret == NETDEV_TX_OK) {
if (!nolock) {
- netif_tx_unlock(dev);
+ dev->xmit_lock_owner = -1;
+ spin_unlock(&dev->xmit_lock);
}
spin_lock(&dev->queue_lock);
return -1;
/* NETDEV_TX_BUSY - we need to requeue */
/* Release the driver */
if (!nolock) {
- netif_tx_unlock(dev);
+ dev->xmit_lock_owner = -1;
+ spin_unlock(&dev->xmit_lock);
}
spin_lock(&dev->queue_lock);
q = dev->qdisc;
*/
requeue:
- if (skb->next)
- dev->gso_skb = skb;
- else
- q->ops->requeue(skb, q);
+ q->ops->requeue(skb, q);
netif_schedule(dev);
return 1;
}
return q->q.qlen;
}
-void __qdisc_run(struct net_device *dev)
-{
- if (unlikely(dev->qdisc == &noop_qdisc))
- goto out;
-
- while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev))
- /* NOTHING */;
-
-out:
- clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
-}
-
static void dev_watchdog(unsigned long arg)
{
struct net_device *dev = (struct net_device *)arg;
- netif_tx_lock(dev);
+ spin_lock(&dev->xmit_lock);
if (dev->qdisc != &noop_qdisc) {
if (netif_device_present(dev) &&
netif_running(dev) &&
netif_carrier_ok(dev)) {
if (netif_queue_stopped(dev) &&
- time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) {
-
- printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n",
- dev->name);
+ (jiffies - dev->trans_start) > dev->watchdog_timeo) {
+ printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name);
dev->tx_timeout(dev);
}
if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
dev_hold(dev);
}
}
- netif_tx_unlock(dev);
+ spin_unlock(&dev->xmit_lock);
dev_put(dev);
}
static void dev_watchdog_up(struct net_device *dev)
{
- netif_tx_lock_bh(dev);
+ spin_lock_bh(&dev->xmit_lock);
__netdev_watchdog_up(dev);
- netif_tx_unlock_bh(dev);
+ spin_unlock_bh(&dev->xmit_lock);
}
static void dev_watchdog_down(struct net_device *dev)
{
- netif_tx_lock_bh(dev);
+ spin_lock_bh(&dev->xmit_lock);
if (del_timer(&dev->watchdog_timer))
- dev_put(dev);
- netif_tx_unlock_bh(dev);
+ __dev_put(dev);
+ spin_unlock_bh(&dev->xmit_lock);
}
void netif_carrier_on(struct net_device *dev)
static void __qdisc_destroy(struct rcu_head *head)
{
struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
- struct Qdisc_ops *ops = qdisc->ops;
-
-#ifdef CONFIG_NET_ESTIMATOR
- gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
-#endif
- write_lock(&qdisc_tree_lock);
- if (ops->reset)
- ops->reset(qdisc);
- if (ops->destroy)
- ops->destroy(qdisc);
- write_unlock(&qdisc_tree_lock);
- module_put(ops->owner);
-
- dev_put(qdisc->dev);
kfree((char *) qdisc - qdisc->padded);
}
void qdisc_destroy(struct Qdisc *qdisc)
{
- struct list_head cql = LIST_HEAD_INIT(cql);
- struct Qdisc *cq, *q, *n;
+ struct Qdisc_ops *ops = qdisc->ops;
if (qdisc->flags & TCQ_F_BUILTIN ||
- !atomic_dec_and_test(&qdisc->refcnt))
+ !atomic_dec_and_test(&qdisc->refcnt))
return;
- if (!list_empty(&qdisc->list)) {
- if (qdisc->ops->cl_ops == NULL)
- list_del(&qdisc->list);
- else
- list_move(&qdisc->list, &cql);
- }
-
- /* unlink inner qdiscs from dev->qdisc_list immediately */
- list_for_each_entry(cq, &cql, list)
- list_for_each_entry_safe(q, n, &qdisc->dev->qdisc_list, list)
- if (TC_H_MAJ(q->parent) == TC_H_MAJ(cq->handle)) {
- if (q->ops->cl_ops == NULL)
- list_del_init(&q->list);
- else
- list_move_tail(&q->list, &cql);
- }
- list_for_each_entry_safe(cq, n, &cql, list)
- list_del_init(&cq->list);
+ list_del(&qdisc->list);
+#ifdef CONFIG_NET_ESTIMATOR
+ gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
+#endif
+ if (ops->reset)
+ ops->reset(qdisc);
+ if (ops->destroy)
+ ops->destroy(qdisc);
+ module_put(ops->owner);
+ dev_put(qdisc->dev);
call_rcu(&qdisc->q_rcu, __qdisc_destroy);
}
printk(KERN_INFO "%s: activation failed\n", dev->name);
return;
}
- write_lock_bh(&qdisc_tree_lock);
+ write_lock(&qdisc_tree_lock);
list_add_tail(&qdisc->list, &dev->qdisc_list);
- write_unlock_bh(&qdisc_tree_lock);
+ write_unlock(&qdisc_tree_lock);
} else {
qdisc = &noqueue_qdisc;
}
- write_lock_bh(&qdisc_tree_lock);
+ write_lock(&qdisc_tree_lock);
dev->qdisc_sleeping = qdisc;
- write_unlock_bh(&qdisc_tree_lock);
+ write_unlock(&qdisc_tree_lock);
}
if (!netif_carrier_ok(dev))
dev_watchdog_down(dev);
- /* Wait for outstanding dev_queue_xmit calls. */
- synchronize_rcu();
-
- /* Wait for outstanding qdisc_run calls. */
- while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
+ while (test_bit(__LINK_STATE_SCHED, &dev->state))
yield();
- if (dev->gso_skb) {
- kfree_skb(dev->gso_skb);
- dev->gso_skb = NULL;
- }
+ spin_unlock_wait(&dev->xmit_lock);
}
void dev_init_scheduler(struct net_device *dev)
EXPORT_SYMBOL(qdisc_alloc);
EXPORT_SYMBOL(qdisc_destroy);
EXPORT_SYMBOL(qdisc_reset);
+EXPORT_SYMBOL(qdisc_restart);
EXPORT_SYMBOL(qdisc_lock_tree);
EXPORT_SYMBOL(qdisc_unlock_tree);