if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
return skb;
- vport = (struct vport *)rcu_dereference(skb->dev->br_port);
+ vport = netdev_get_vport(skb->dev);
netdev_port_receive(vport, skb);
/* If we are using the vport stats layer initialize it to the current
* values so we are roughly consistent with the device stats. */
if (USE_VPORT_STATS) {
- struct odp_vport_stats stats;
+ struct rtnl_link_stats64 stats;
err = netdev_get_stats(vport, &stats);
if (!err)
struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
int err;
- rcu_assign_pointer(netdev_vport->dev->br_port,
- (struct net_bridge_port *)vport);
err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
- NULL);
+ vport);
if (err)
return err;
dev_set_promiscuity(netdev_vport->dev, 1);
dev_disable_lro(netdev_vport->dev);
+ netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
return 0;
}
{
struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+ netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
netdev_rx_handler_unregister(netdev_vport->dev);
- rcu_assign_pointer(netdev_vport->dev->br_port, NULL);
dev_set_promiscuity(netdev_vport->dev, -1);
return 0;
return &netdev_vport->dev->NETDEV_DEV_MEMBER.kobj;
}
-int netdev_get_stats(const struct vport *vport, struct odp_vport_stats *stats)
+int netdev_get_stats(const struct vport *vport, struct rtnl_link_stats64 *stats)
{
const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
- const struct net_device_stats *netdev_stats;
-
- netdev_stats = dev_get_stats(netdev_vport->dev);
-
- stats->rx_bytes = netdev_stats->rx_bytes;
- stats->rx_packets = netdev_stats->rx_packets;
- stats->tx_bytes = netdev_stats->tx_bytes;
- stats->tx_packets = netdev_stats->tx_packets;
- stats->rx_dropped = netdev_stats->rx_dropped;
- stats->rx_errors = netdev_stats->rx_errors;
- stats->rx_frame_err = netdev_stats->rx_frame_errors;
- stats->rx_over_err = netdev_stats->rx_over_errors;
- stats->rx_crc_err = netdev_stats->rx_crc_errors;
- stats->tx_dropped = netdev_stats->tx_dropped;
- stats->tx_errors = netdev_stats->tx_errors;
- stats->collisions = netdev_stats->collisions;
-
+ dev_get_stats(netdev_vport->dev, stats);
return 0;
}
/* Returns null if this device is not attached to a datapath. */
struct vport *netdev_get_vport(struct net_device *dev)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
+ /* XXX: The bridge code may have registered the data.
+ * So check that the handler pointer is the datapath's.
+ * Once the merge is done and IFF_OVS_DATAPATH stops
+ * being the same value as IFF_BRIDGE_PORT the check can
+ * simply be netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH. */
+ if (rcu_dereference(dev->rx_handler) != netdev_frame_hook)
+ return NULL;
+ return (struct vport *)rcu_dereference(dev->rx_handler_data);
+#else
return (struct vport *)rcu_dereference(dev->br_port);
+#endif
}
struct vport_ops netdev_vport_ops = {
.send = netdev_send,
};
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
/*
- * Open vSwitch cannot safely coexist with the Linux bridge module on any
- * released version of Linux, because there is only a single bridge hook
- * function and only a single br_port member in struct net_device.
+ * In kernels earlier than 2.6.36, Open vSwitch cannot safely coexist with
+ * the Linux bridge module on any released version of Linux, because there
+ * is only a single bridge hook function and only a single br_port member
+ * in struct net_device.
*
* Declaring and exporting this symbol enforces mutual exclusion. The bridge
* module also exports the same symbol, so the module loader will refuse to
*/
typeof(br_should_route_hook) br_should_route_hook;
EXPORT_SYMBOL(br_should_route_hook);
+#endif