X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=datapath%2Fdp_dev.c;h=9caf65af7772561b0906b52df478c99a75a0d87b;hb=3b01baa3970139c3a195017ab1ea3e42761e3db2;hp=848a27b28a85692ad136c0d94d71366c249bb459;hpb=080ec396fd98fa6848295a7582c3fbfeb0a3589d;p=sliver-openvswitch.git diff --git a/datapath/dp_dev.c b/datapath/dp_dev.c index 848a27b28..9caf65af7 100644 --- a/datapath/dp_dev.c +++ b/datapath/dp_dev.c @@ -17,6 +17,13 @@ #include "datapath.h" #include "dp_dev.h" +struct pcpu_lstats { + unsigned long rx_packets; + unsigned long rx_bytes; + unsigned long tx_packets; + unsigned long tx_bytes; +}; + struct datapath *dp_dev_get_dp(struct net_device *netdev) { return dp_dev_priv(netdev)->dp; @@ -26,12 +33,27 @@ EXPORT_SYMBOL(dp_dev_get_dp); static struct net_device_stats *dp_dev_get_stats(struct net_device *netdev) { struct dp_dev *dp_dev = dp_dev_priv(netdev); - return &dp_dev->stats; + struct net_device_stats *stats; + int i; + + stats = &dp_dev->stats; + memset(stats, 0, sizeof *stats); + for_each_possible_cpu(i) { + const struct pcpu_lstats *lb_stats; + + lb_stats = per_cpu_ptr(dp_dev->lstats, i); + stats->rx_bytes += lb_stats->rx_bytes; + stats->rx_packets += lb_stats->rx_packets; + stats->tx_bytes += lb_stats->tx_bytes; + stats->tx_packets += lb_stats->tx_packets; + } + return stats; } int dp_dev_recv(struct net_device *netdev, struct sk_buff *skb) { struct dp_dev *dp_dev = dp_dev_priv(netdev); + struct pcpu_lstats *lb_stats; int len; len = skb->len; skb->pkt_type = PACKET_HOST; @@ -41,8 +63,9 @@ int dp_dev_recv(struct net_device *netdev, struct sk_buff *skb) else netif_rx_ni(skb); netdev->last_rx = jiffies; - dp_dev->stats.rx_packets++; - dp_dev->stats.rx_bytes += len; + lb_stats = per_cpu_ptr(dp_dev->lstats, smp_processor_id()); + lb_stats->rx_packets++; + lb_stats->rx_bytes += len; return len; } @@ -56,58 +79,30 @@ static int dp_dev_mac_addr(struct net_device *dev, void *p) return 0; } +/* Not reentrant (because it is called with BHs disabled), but may be called + * simultaneously on different CPUs. */ static int dp_dev_xmit(struct sk_buff *skb, struct net_device *netdev) { struct dp_dev *dp_dev = dp_dev_priv(netdev); + struct pcpu_lstats *lb_stats; - /* By orphaning 'skb' we will screw up socket accounting slightly, but - * the effect is limited to the device queue length. If we don't - * do this, then the sk_buff will be destructed eventually, but it is - * harder to predict when. */ - skb_orphan(skb); - - /* We are going to modify 'skb', by sticking it on &dp_dev->xmit_queue, - * so we need to have our own clone. (At any rate, fwd_port_input() - * will need its own clone, so there's no benefit to queuing any other - * way.) */ + /* dp_process_received_packet() needs its own clone. */ skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return 0; - dp_dev->stats.tx_packets++; - dp_dev->stats.tx_bytes += skb->len; - - if (skb_queue_len(&dp_dev->xmit_queue) >= netdev->tx_queue_len) { - /* Queue overflow. Stop transmitter. */ - netif_stop_queue(netdev); - - /* We won't see all dropped packets individually, so overrun - * error is appropriate. */ - dp_dev->stats.tx_fifo_errors++; - } - skb_queue_tail(&dp_dev->xmit_queue, skb); - netdev->trans_start = jiffies; + lb_stats = per_cpu_ptr(dp_dev->lstats, smp_processor_id()); + lb_stats->tx_packets++; + lb_stats->tx_bytes += skb->len; - schedule_work(&dp_dev->xmit_work); + skb_reset_mac_header(skb); + rcu_read_lock_bh(); + dp_process_received_packet(skb, dp_dev->dp->ports[dp_dev->port_no]); + rcu_read_unlock_bh(); return 0; } -static void dp_dev_do_xmit(struct work_struct *work) -{ - struct dp_dev *dp_dev = container_of(work, struct dp_dev, xmit_work); - struct datapath *dp = dp_dev->dp; - struct sk_buff *skb; - - while ((skb = skb_dequeue(&dp_dev->xmit_queue)) != NULL) { - skb_reset_mac_header(skb); - rcu_read_lock_bh(); - dp_process_received_packet(skb, dp->ports[dp_dev->port_no]); - rcu_read_unlock_bh(); - } - netif_wake_queue(dp_dev->dev); -} - static int dp_dev_open(struct net_device *netdev) { netif_start_queue(netdev); @@ -135,6 +130,25 @@ static struct ethtool_ops dp_ethtool_ops = { .get_tso = ethtool_op_get_tso, }; +static int dp_dev_init(struct net_device *netdev) +{ + struct dp_dev *dp_dev = dp_dev_priv(netdev); + + dp_dev->lstats = alloc_percpu(struct pcpu_lstats); + if (!dp_dev->lstats) + return -ENOMEM; + + return 0; +} + +static void dp_dev_free(struct net_device *netdev) +{ + struct dp_dev *dp_dev = dp_dev_priv(netdev); + + free_percpu(dp_dev->lstats); + free_netdev(netdev); +} + static void do_setup(struct net_device *netdev) { @@ -146,10 +160,13 @@ do_setup(struct net_device *netdev) netdev->open = dp_dev_open; SET_ETHTOOL_OPS(netdev, &dp_ethtool_ops); netdev->stop = dp_dev_stop; - netdev->tx_queue_len = 100; + netdev->tx_queue_len = 0; netdev->set_mac_address = dp_dev_mac_addr; + netdev->init = dp_dev_init; + netdev->destructor = dp_dev_free; netdev->flags = IFF_BROADCAST | IFF_MULTICAST; + netdev->features = NETIF_F_LLTX; /* XXX other features? */ random_ether_addr(netdev->dev_addr); @@ -195,19 +212,12 @@ struct net_device *dp_dev_create(struct datapath *dp, const char *dp_name, int p dp_dev->dp = dp; dp_dev->port_no = port_no; dp_dev->dev = netdev; - skb_queue_head_init(&dp_dev->xmit_queue); - INIT_WORK(&dp_dev->xmit_work, dp_dev_do_xmit); return netdev; } /* Called with RTNL lock and dp_mutex.*/ void dp_dev_destroy(struct net_device *netdev) { - struct dp_dev *dp_dev = dp_dev_priv(netdev); - - netif_tx_disable(netdev); - synchronize_net(); - skb_queue_purge(&dp_dev->xmit_queue); unregister_netdevice(netdev); }