#include "iseries_veth.h"
-extern struct vio_dev *iSeries_veth_dev;
-
MODULE_AUTHOR("Kyle Lucke <klucke@us.ibm.com>");
MODULE_DESCRIPTION("iSeries Virtual ethernet driver");
MODULE_LICENSE("GPL");
int token;
unsigned long in_use;
struct sk_buff *skb;
+ struct device *dev;
};
struct veth_lpar_connection {
};
struct veth_port {
+ struct device *dev;
struct net_device_stats stats;
u64 mac_addr;
HvLpIndexMap lpar_map;
if (cnx->msgs)
for (i = 0; i < VETH_NUMBUFFERS; ++i)
veth_recycle_msg(cnx, cnx->msgs + i);
+ spin_unlock_irq(&cnx->lock);
+ veth_flush_pending(cnx);
+ spin_lock_irq(&cnx->lock);
+ if (cnx->state & VETH_STATE_RESET)
+ goto restart;
}
if (cnx->state & VETH_STATE_SHUTDOWN)
return -EOPNOTSUPP;
}
-struct net_device * __init veth_probe_one(int vlan)
+static void veth_tx_timeout(struct net_device *dev)
+{
+ struct veth_port *port = (struct veth_port *)dev->priv;
+ struct net_device_stats *stats = &port->stats;
+ unsigned long flags;
+ int i;
+
+ stats->tx_errors++;
+
+ spin_lock_irqsave(&port->pending_gate, flags);
+
+ printk(KERN_WARNING "%s: Tx timeout! Resetting lp connections: %08x\n",
+ dev->name, port->pending_lpmask);
+
+ /* If we've timed out the queue must be stopped, which should
+ * only ever happen when there is a pending packet. */
+ WARN_ON(! port->pending_lpmask);
+
+ for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
+ struct veth_lpar_connection *cnx = veth_cnx[i];
+
+ if (! (port->pending_lpmask & (1<<i)))
+ continue;
+
+ /* If we're pending on it, we must be connected to it,
+ * so we should certainly have a structure for it. */
+ BUG_ON(! cnx);
+
+ /* Theoretically we could be kicking a connection
+ * which doesn't deserve it, but in practice if we've
+ * had a Tx timeout, the pending_lpmask will have
+ * exactly one bit set - the connection causing the
+ * problem. */
+ spin_lock(&cnx->lock);
+ cnx->state |= VETH_STATE_RESET;
+ veth_kick_statemachine(cnx);
+ spin_unlock(&cnx->lock);
+ }
+
+ spin_unlock_irqrestore(&port->pending_gate, flags);
+}
+
+static struct net_device * __init veth_probe_one(int vlan, struct device *vdev)
{
struct net_device *dev;
struct veth_port *port;
if (map & (0x8000 >> vlan))
port->lpar_map |= (1 << i);
}
+ port->dev = vdev;
dev->dev_addr[0] = 0x02;
dev->dev_addr[1] = 0x01;
dev->set_multicast_list = veth_set_multicast_list;
dev->do_ioctl = veth_ioctl;
+ dev->watchdog_timeo = 2 * (VETH_ACKTIMEOUT * HZ / 1000000);
+ dev->tx_timeout = veth_tx_timeout;
+
+ SET_NETDEV_DEV(dev, vdev);
+
rc = register_netdev(dev);
if (rc != 0) {
veth_printk(KERN_ERR,
}
dma_length = skb->len;
- dma_address = vio_map_single(iSeries_veth_dev, skb->data,
+ dma_address = dma_map_single(port->dev, skb->data,
dma_length, DMA_TO_DEVICE);
if (dma_mapping_error(dma_address))
/* Is it really necessary to check the length and address
* fields of the first entry here? */
msg->skb = skb;
+ msg->dev = port->dev;
msg->data.addr[0] = dma_address;
msg->data.len[0] = dma_length;
msg->data.eofmask = 1 << VETH_EOF_SHIFT;
int rc;
for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
- struct sk_buff *clone;
-
- if (! lpmask & (1<<i))
- continue;
-
- clone = skb_clone(skb, GFP_ATOMIC);
- if (! clone) {
- veth_error("%s: skb_clone failed %p\n",
- dev->name, skb);
+ if ((lpmask & (1 << i)) == 0)
continue;
- }
- rc = veth_transmit_to_one(clone, i, dev);
+ rc = veth_transmit_to_one(skb_get(skb), i, dev);
if (! rc)
lpmask &= ~(1<<i);
}
lpmask = port->lpar_map;
}
+ spin_lock_irqsave(&port->pending_gate, flags);
+
lpmask = veth_transmit_to_many(skb, lpmask, dev);
if (! lpmask) {
dev_kfree_skb(skb);
} else {
- spin_lock_irqsave(&port->pending_gate, flags);
if (port->pending_skb) {
veth_error("%s: Tx while skb was pending!\n",
dev->name);
dev_kfree_skb(skb);
+ spin_unlock_irqrestore(&port->pending_gate, flags);
return 1;
}
port->pending_skb = skb;
port->pending_lpmask = lpmask;
netif_stop_queue(dev);
-
- spin_unlock_irqrestore(&port->pending_gate, flags);
}
+ spin_unlock_irqrestore(&port->pending_gate, flags);
+
return 0;
}
dma_address = msg->data.addr[0];
dma_length = msg->data.len[0];
- vio_unmap_single(iSeries_veth_dev, dma_address, dma_length,
+ dma_unmap_single(msg->dev, dma_address, dma_length,
DMA_TO_DEVICE);
if (msg->skb) {
if (! port->pending_lpmask) {
dev_kfree_skb_any(port->pending_skb);
port->pending_skb = NULL;
- netif_start_queue(dev);
+ netif_wake_queue(dev);
}
}
spin_unlock_irqrestore(&port->pending_gate, flags);
spin_unlock_irqrestore(&cnx->lock, flags);
}
+static int veth_remove(struct vio_dev *vdev)
+{
+ int i = vdev->unit_address;
+ struct net_device *dev;
+
+ dev = veth_dev[i];
+ if (dev != NULL) {
+ veth_dev[i] = NULL;
+ unregister_netdev(dev);
+ free_netdev(dev);
+ }
+ return 0;
+}
+
+static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+{
+ int i = vdev->unit_address;
+ struct net_device *dev;
+
+ dev = veth_probe_one(i, &vdev->dev);
+ if (dev == NULL) {
+ veth_remove(vdev);
+ return 1;
+ }
+ veth_dev[i] = dev;
+
+ /* Start the state machine on each connection, to commence
+ * link negotiation */
+ for (i = 0; i < HVMAXARCHITECTEDLPS; i++)
+ if (veth_cnx[i])
+ veth_kick_statemachine(veth_cnx[i]);
+
+ return 0;
+}
+
+/**
+ * veth_device_table: Used by vio.c to match devices that we
+ * support.
+ */
+static struct vio_device_id veth_device_table[] __devinitdata = {
+ { "vlan", "" },
+ { NULL, NULL }
+};
+MODULE_DEVICE_TABLE(vio, veth_device_table);
+
+static struct vio_driver veth_driver = {
+ .name = "iseries_veth",
+ .id_table = veth_device_table,
+ .probe = veth_probe,
+ .remove = veth_remove
+};
+
/*
* Module initialization/cleanup
*/
{
int i;
+ vio_unregister_driver(&veth_driver);
+
for (i = 0; i < HVMAXARCHITECTEDLPS; ++i)
veth_destroy_connection(i);
HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan);
-
- for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; ++i) {
- struct net_device *dev = veth_dev[i];
-
- if (! dev)
- continue;
-
- veth_dev[i] = NULL;
- unregister_netdev(dev);
- free_netdev(dev);
- }
}
module_exit(veth_module_cleanup);
int __init veth_module_init(void)
{
- HvLpIndexMap vlan_map = HvLpConfig_getVirtualLanIndexMap();
int i;
int rc;
}
}
- for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; ++i) {
- struct net_device *dev;
-
- if (! (vlan_map & (0x8000 >> i)))
- continue;
-
- dev = veth_probe_one(i);
-
- if (! dev) {
- veth_module_cleanup();
- return rc;
- }
-
- veth_dev[i] = dev;
- }
-
HvLpEvent_registerHandler(HvLpEvent_Type_VirtualLan,
&veth_handle_event);
- /* Start the state machine on each connection, to commence
- * link negotiation */
- for (i = 0; i < HVMAXARCHITECTEDLPS; i++)
- if (veth_cnx[i])
- veth_kick_statemachine(veth_cnx[i]);
-
- return 0;
+ return vio_register_driver(&veth_driver);
}
module_init(veth_module_init);