2 * Copyright (c) 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
31 #include "dpif-netdev.h"
33 #include "netdev-dpdk.h"
34 #include "netdev-provider.h"
35 #include "netdev-vport.h"
37 #include "ofp-print.h"
39 #include "ovs-thread.h"
44 #include "unaligned.h"
49 VLOG_DEFINE_THIS_MODULE(dpdk);
50 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
52 #define DPDK_PORT_WATCHDOG_INTERVAL 5
54 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
55 #define OVS_VPORT_DPDK "ovs_dpdk"
58 * need to reserve tons of extra space in the mbufs so we can align the
59 * DMA addresses to 4KB.
62 #define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
63 #define MBUF_SIZE(mtu) (MTU_TO_MAX_LEN(mtu) + (512) + \
64 sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
66 /* TODO: mempool size should be based on system resources. */
67 #define NB_MBUF (4096 * 64)
68 #define MP_CACHE_SZ (256 * 2)
71 #define NON_PMD_THREAD_TX_QUEUE 0
73 /* TODO: Needs per NIC value for these constants. */
74 #define RX_PTHRESH 32 /* Default values of RX prefetch threshold reg. */
75 #define RX_HTHRESH 32 /* Default values of RX host threshold reg. */
76 #define RX_WTHRESH 16 /* Default values of RX write-back threshold reg. */
78 #define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */
79 #define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
80 #define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
82 static const struct rte_eth_conf port_conf = {
84 .mq_mode = ETH_MQ_RX_RSS,
86 .header_split = 0, /* Header Split disabled */
87 .hw_ip_checksum = 0, /* IP checksum offload disabled */
88 .hw_vlan_filter = 0, /* VLAN filtering disabled */
89 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
95 .rss_hf = ETH_RSS_IPV4_TCP | ETH_RSS_IPV4 | ETH_RSS_IPV6,
99 .mq_mode = ETH_MQ_TX_NONE,
103 static const struct rte_eth_rxconf rx_conf = {
105 .pthresh = RX_PTHRESH,
106 .hthresh = RX_HTHRESH,
107 .wthresh = RX_WTHRESH,
111 static const struct rte_eth_txconf tx_conf = {
113 .pthresh = TX_PTHRESH,
114 .hthresh = TX_HTHRESH,
115 .wthresh = TX_WTHRESH,
121 enum { MAX_RX_QUEUE_LEN = 64 };
122 enum { MAX_TX_QUEUE_LEN = 64 };
123 enum { DRAIN_TSC = 200000ULL };
125 static int rte_eal_init_ret = ENODEV;
127 static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
129 /* Contains all 'struct dpdk_dev's. */
130 static struct list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
131 = LIST_INITIALIZER(&dpdk_list);
133 static struct list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
134 = LIST_INITIALIZER(&dpdk_mp_list);
137 struct rte_mempool *mp;
141 struct list list_node OVS_GUARDED_BY(dpdk_mutex);
144 struct dpdk_tx_queue {
145 rte_spinlock_t tx_lock;
148 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
156 struct dpdk_tx_queue tx_q[NR_QUEUE];
158 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
160 struct dpdk_mp *dpdk_mp;
164 struct netdev_stats stats_offset;
165 struct netdev_stats stats;
167 uint8_t hwaddr[ETH_ADDR_LEN];
168 enum netdev_flags flags;
170 struct rte_eth_link link;
174 struct list list_node OVS_GUARDED_BY(dpdk_mutex);
177 struct netdev_rxq_dpdk {
178 struct netdev_rxq up;
182 static int netdev_dpdk_construct(struct netdev *);
185 is_dpdk_class(const struct netdev_class *class)
187 return class->construct == netdev_dpdk_construct;
190 /* TODO: use dpdk malloc for entire OVS. infact huge page shld be used
191 * for all other sengments data, bss and text. */
194 dpdk_rte_mzalloc(size_t sz)
198 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
206 free_dpdk_buf(struct ofpbuf *b)
208 struct rte_mbuf *pkt = (struct rte_mbuf *) b;
210 rte_mempool_put(pkt->pool, pkt);
214 __rte_pktmbuf_init(struct rte_mempool *mp,
215 void *opaque_arg OVS_UNUSED,
217 unsigned i OVS_UNUSED)
219 struct rte_mbuf *m = _m;
220 uint32_t buf_len = mp->elt_size - sizeof(struct ofpbuf);
222 RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct ofpbuf));
224 memset(m, 0, mp->elt_size);
226 /* start of buffer is just after mbuf structure */
227 m->buf_addr = (char *)m + sizeof(struct ofpbuf);
228 m->buf_physaddr = rte_mempool_virt2phy(mp, m) +
229 sizeof(struct ofpbuf);
230 m->buf_len = (uint16_t)buf_len;
232 /* keep some headroom between start of buffer and data */
233 m->pkt.data = (char*) m->buf_addr + RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
235 /* init some constant fields */
236 m->type = RTE_MBUF_PKT;
239 m->pkt.in_port = 0xff;
243 ovs_rte_pktmbuf_init(struct rte_mempool *mp,
244 void *opaque_arg OVS_UNUSED,
246 unsigned i OVS_UNUSED)
248 struct rte_mbuf *m = _m;
250 __rte_pktmbuf_init(mp, opaque_arg, _m, i);
252 ofpbuf_init_dpdk((struct ofpbuf *) m, m->buf_len);
255 static struct dpdk_mp *
256 dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
258 struct dpdk_mp *dmp = NULL;
259 char mp_name[RTE_MEMPOOL_NAMESIZE];
261 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
262 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
268 dmp = dpdk_rte_mzalloc(sizeof *dmp);
269 dmp->socket_id = socket_id;
273 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d", dmp->mtu);
274 dmp->mp = rte_mempool_create(mp_name, NB_MBUF, MBUF_SIZE(mtu),
276 sizeof(struct rte_pktmbuf_pool_private),
277 rte_pktmbuf_pool_init, NULL,
278 ovs_rte_pktmbuf_init, NULL,
281 if (dmp->mp == NULL) {
285 list_push_back(&dpdk_mp_list, &dmp->list_node);
290 dpdk_mp_put(struct dpdk_mp *dmp)
298 ovs_assert(dmp->refcount >= 0);
301 /* I could not find any API to destroy mp. */
302 if (dmp->refcount == 0) {
303 list_delete(dmp->list_node);
304 /* destroy mp-pool. */
310 check_link_status(struct netdev_dpdk *dev)
312 struct rte_eth_link link;
314 rte_eth_link_get_nowait(dev->port_id, &link);
316 if (dev->link.link_status != link.link_status) {
317 netdev_change_seq_changed(&dev->up);
319 dev->link_reset_cnt++;
321 if (dev->link.link_status) {
322 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
323 dev->port_id, (unsigned)dev->link.link_speed,
324 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
325 ("full-duplex") : ("half-duplex"));
327 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
333 dpdk_watchdog(void *dummy OVS_UNUSED)
335 struct netdev_dpdk *dev;
337 pthread_detach(pthread_self());
340 ovs_mutex_lock(&dpdk_mutex);
341 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
342 ovs_mutex_lock(&dev->mutex);
343 check_link_status(dev);
344 ovs_mutex_unlock(&dev->mutex);
346 ovs_mutex_unlock(&dpdk_mutex);
347 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
354 dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
356 struct rte_pktmbuf_pool_private *mbp_priv;
357 struct ether_addr eth_addr;
361 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
365 diag = rte_eth_dev_configure(dev->port_id, NR_QUEUE, NR_QUEUE, &port_conf);
367 VLOG_ERR("eth dev config error %d",diag);
371 for (i = 0; i < NR_QUEUE; i++) {
372 diag = rte_eth_tx_queue_setup(dev->port_id, i, 64, 0, &tx_conf);
374 VLOG_ERR("eth dev tx queue setup error %d",diag);
379 for (i = 0; i < NR_QUEUE; i++) {
380 diag = rte_eth_rx_queue_setup(dev->port_id, i, 64, 0, &rx_conf,
383 VLOG_ERR("eth dev rx queue setup error %d",diag);
388 diag = rte_eth_dev_start(dev->port_id);
390 VLOG_ERR("eth dev start error %d",diag);
394 rte_eth_promiscuous_enable(dev->port_id);
395 rte_eth_allmulticast_enable(dev->port_id);
397 memset(ð_addr, 0x0, sizeof(eth_addr));
398 rte_eth_macaddr_get(dev->port_id, ð_addr);
399 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
400 dev->port_id, ETH_ADDR_ARGS(eth_addr.addr_bytes));
402 memcpy(dev->hwaddr, eth_addr.addr_bytes, ETH_ADDR_LEN);
403 rte_eth_link_get_nowait(dev->port_id, &dev->link);
405 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
406 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
408 dev->flags = NETDEV_UP | NETDEV_PROMISC;
412 static struct netdev_dpdk *
413 netdev_dpdk_cast(const struct netdev *netdev)
415 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
418 static struct netdev *
419 netdev_dpdk_alloc(void)
421 struct netdev_dpdk *netdev = dpdk_rte_mzalloc(sizeof *netdev);
426 netdev_dpdk_construct(struct netdev *netdev_)
428 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
429 unsigned int port_no;
434 if (rte_eal_init_ret) {
435 return rte_eal_init_ret;
438 ovs_mutex_lock(&dpdk_mutex);
439 cport = netdev_->name + 4; /* Names always start with "dpdk" */
441 if (strncmp(netdev_->name, "dpdk", 4)) {
446 port_no = strtol(cport, 0, 0); /* string must be null terminated */
448 for (i = 0; i < NR_QUEUE; i++) {
449 rte_spinlock_init(&netdev->tx_q[i].tx_lock);
452 ovs_mutex_init(&netdev->mutex);
454 ovs_mutex_lock(&netdev->mutex);
457 netdev->mtu = ETHER_MTU;
458 netdev->max_packet_len = MTU_TO_MAX_LEN(netdev->mtu);
460 /* TODO: need to discover device node at run time. */
461 netdev->socket_id = SOCKET0;
462 netdev->port_id = port_no;
464 netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, netdev->mtu);
465 if (!netdev->dpdk_mp) {
470 err = dpdk_eth_dev_init(netdev);
474 netdev_->n_rxq = NR_QUEUE;
476 list_push_back(&dpdk_list, &netdev->list_node);
479 ovs_mutex_unlock(&netdev->mutex);
481 ovs_mutex_unlock(&dpdk_mutex);
486 netdev_dpdk_destruct(struct netdev *netdev_)
488 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
490 ovs_mutex_lock(&dev->mutex);
491 rte_eth_dev_stop(dev->port_id);
492 ovs_mutex_unlock(&dev->mutex);
494 ovs_mutex_lock(&dpdk_mutex);
495 list_remove(&dev->list_node);
496 dpdk_mp_put(dev->dpdk_mp);
497 ovs_mutex_unlock(&dpdk_mutex);
499 ovs_mutex_destroy(&dev->mutex);
503 netdev_dpdk_dealloc(struct netdev *netdev_)
505 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
511 netdev_dpdk_get_config(const struct netdev *netdev_, struct smap *args)
513 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
515 ovs_mutex_lock(&dev->mutex);
517 /* TODO: Allow to configure number of queues. */
518 smap_add_format(args, "configured_rx_queues", "%u", netdev_->n_rxq);
519 smap_add_format(args, "configured_tx_queues", "%u", netdev_->n_rxq);
520 ovs_mutex_unlock(&dev->mutex);
525 static struct netdev_rxq *
526 netdev_dpdk_rxq_alloc(void)
528 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
533 static struct netdev_rxq_dpdk *
534 netdev_rxq_dpdk_cast(const struct netdev_rxq *rx)
536 return CONTAINER_OF(rx, struct netdev_rxq_dpdk, up);
540 netdev_dpdk_rxq_construct(struct netdev_rxq *rxq_)
542 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
543 struct netdev_dpdk *netdev = netdev_dpdk_cast(rx->up.netdev);
545 ovs_mutex_lock(&netdev->mutex);
546 rx->port_id = netdev->port_id;
547 ovs_mutex_unlock(&netdev->mutex);
553 netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq_ OVS_UNUSED)
558 netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq_)
560 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
566 dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
568 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
571 if (txq->count == 0) {
574 rte_spinlock_lock(&txq->tx_lock);
575 nb_tx = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts, txq->count);
576 if (nb_tx != txq->count) {
577 /* free buffers if we couldn't transmit packets */
578 rte_mempool_put_bulk(dev->dpdk_mp->mp,
579 (void **) &txq->burst_pkts[nb_tx],
580 (txq->count - nb_tx));
583 rte_spinlock_unlock(&txq->tx_lock);
587 netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct ofpbuf **packets, int *c)
589 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
590 struct netdev *netdev = rx->up.netdev;
591 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
594 dpdk_queue_flush(dev, rxq_->queue_id);
596 nb_rx = rte_eth_rx_burst(rx->port_id, rxq_->queue_id,
597 (struct rte_mbuf **) packets, MAX_RX_QUEUE_LEN);
608 dpdk_queue_pkt(struct netdev_dpdk *dev, int qid,
609 struct rte_mbuf *pkt)
611 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
616 rte_spinlock_lock(&txq->tx_lock);
617 txq->burst_pkts[txq->count++] = pkt;
618 if (txq->count == MAX_TX_QUEUE_LEN) {
621 cur_tsc = rte_get_timer_cycles();
622 if (txq->count == 1) {
625 diff_tsc = cur_tsc - txq->tsc;
626 if (diff_tsc >= DRAIN_TSC) {
629 rte_spinlock_unlock(&txq->tx_lock);
633 nb_tx = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts, txq->count);
634 if (nb_tx != txq->count) {
635 /* free buffers if we couldn't transmit packets */
636 rte_mempool_put_bulk(dev->dpdk_mp->mp,
637 (void **) &txq->burst_pkts[nb_tx],
638 (txq->count - nb_tx));
641 rte_spinlock_unlock(&txq->tx_lock);
644 /* Tx function. Transmit packets indefinitely */
646 dpdk_do_tx_copy(struct netdev *netdev, char *buf, int size)
648 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
649 struct rte_mbuf *pkt;
651 pkt = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
653 ovs_mutex_lock(&dev->mutex);
654 dev->stats.tx_dropped++;
655 ovs_mutex_unlock(&dev->mutex);
659 /* We have to do a copy for now */
660 memcpy(pkt->pkt.data, buf, size);
662 rte_pktmbuf_data_len(pkt) = size;
663 rte_pktmbuf_pkt_len(pkt) = size;
665 dpdk_queue_pkt(dev, NON_PMD_THREAD_TX_QUEUE, pkt);
666 dpdk_queue_flush(dev, NON_PMD_THREAD_TX_QUEUE);
670 netdev_dpdk_send(struct netdev *netdev,
671 struct ofpbuf *ofpbuf, bool may_steal)
673 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
676 if (ofpbuf_size(ofpbuf) > dev->max_packet_len) {
677 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
678 (int)ofpbuf_size(ofpbuf) , dev->max_packet_len);
680 ovs_mutex_lock(&dev->mutex);
681 dev->stats.tx_dropped++;
682 ovs_mutex_unlock(&dev->mutex);
688 if (!may_steal || ofpbuf->source != OFPBUF_DPDK) {
689 dpdk_do_tx_copy(netdev, (char *) ofpbuf_data(ofpbuf), ofpbuf_size(ofpbuf));
692 ofpbuf_delete(ofpbuf);
697 qid = rte_lcore_id() % NR_QUEUE;
699 dpdk_queue_pkt(dev, qid, (struct rte_mbuf *)ofpbuf);
709 netdev_dpdk_set_etheraddr(struct netdev *netdev,
710 const uint8_t mac[ETH_ADDR_LEN])
712 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
714 ovs_mutex_lock(&dev->mutex);
715 if (!eth_addr_equals(dev->hwaddr, mac)) {
716 memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
717 netdev_change_seq_changed(netdev);
719 ovs_mutex_unlock(&dev->mutex);
725 netdev_dpdk_get_etheraddr(const struct netdev *netdev,
726 uint8_t mac[ETH_ADDR_LEN])
728 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
730 ovs_mutex_lock(&dev->mutex);
731 memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
732 ovs_mutex_unlock(&dev->mutex);
738 netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
740 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
742 ovs_mutex_lock(&dev->mutex);
744 ovs_mutex_unlock(&dev->mutex);
750 netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
752 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
754 struct dpdk_mp *old_mp;
757 ovs_mutex_lock(&dpdk_mutex);
758 ovs_mutex_lock(&dev->mutex);
759 if (dev->mtu == mtu) {
764 mp = dpdk_mp_get(dev->socket_id, dev->mtu);
770 rte_eth_dev_stop(dev->port_id);
773 old_mp = dev->dpdk_mp;
776 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
778 err = dpdk_eth_dev_init(dev);
783 dev->dpdk_mp = old_mp;
784 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
785 dpdk_eth_dev_init(dev);
790 netdev_change_seq_changed(netdev);
792 ovs_mutex_unlock(&dev->mutex);
793 ovs_mutex_unlock(&dpdk_mutex);
798 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier);
801 netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
803 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
804 struct rte_eth_stats rte_stats;
807 netdev_dpdk_get_carrier(netdev, &gg);
808 ovs_mutex_lock(&dev->mutex);
809 rte_eth_stats_get(dev->port_id, &rte_stats);
811 *stats = dev->stats_offset;
813 stats->rx_packets += rte_stats.ipackets;
814 stats->tx_packets += rte_stats.opackets;
815 stats->rx_bytes += rte_stats.ibytes;
816 stats->tx_bytes += rte_stats.obytes;
817 stats->rx_errors += rte_stats.ierrors;
818 stats->tx_errors += rte_stats.oerrors;
819 stats->multicast += rte_stats.imcasts;
821 stats->tx_dropped += dev->stats.tx_dropped;
822 ovs_mutex_unlock(&dev->mutex);
828 netdev_dpdk_set_stats(struct netdev *netdev, const struct netdev_stats *stats)
830 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
832 ovs_mutex_lock(&dev->mutex);
833 dev->stats_offset = *stats;
834 ovs_mutex_unlock(&dev->mutex);
840 netdev_dpdk_get_features(const struct netdev *netdev_,
841 enum netdev_features *current,
842 enum netdev_features *advertised OVS_UNUSED,
843 enum netdev_features *supported OVS_UNUSED,
844 enum netdev_features *peer OVS_UNUSED)
846 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
847 struct rte_eth_link link;
849 ovs_mutex_lock(&dev->mutex);
851 ovs_mutex_unlock(&dev->mutex);
853 if (link.link_duplex == ETH_LINK_AUTONEG_DUPLEX) {
854 if (link.link_speed == ETH_LINK_SPEED_AUTONEG) {
855 *current = NETDEV_F_AUTONEG;
857 } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
858 if (link.link_speed == ETH_LINK_SPEED_10) {
859 *current = NETDEV_F_10MB_HD;
861 if (link.link_speed == ETH_LINK_SPEED_100) {
862 *current = NETDEV_F_100MB_HD;
864 if (link.link_speed == ETH_LINK_SPEED_1000) {
865 *current = NETDEV_F_1GB_HD;
867 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
868 if (link.link_speed == ETH_LINK_SPEED_10) {
869 *current = NETDEV_F_10MB_FD;
871 if (link.link_speed == ETH_LINK_SPEED_100) {
872 *current = NETDEV_F_100MB_FD;
874 if (link.link_speed == ETH_LINK_SPEED_1000) {
875 *current = NETDEV_F_1GB_FD;
877 if (link.link_speed == ETH_LINK_SPEED_10000) {
878 *current = NETDEV_F_10GB_FD;
886 netdev_dpdk_get_ifindex(const struct netdev *netdev)
888 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
891 ovs_mutex_lock(&dev->mutex);
892 ifindex = dev->port_id;
893 ovs_mutex_unlock(&dev->mutex);
899 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier)
901 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
903 ovs_mutex_lock(&dev->mutex);
904 check_link_status(dev);
905 *carrier = dev->link.link_status;
906 ovs_mutex_unlock(&dev->mutex);
912 netdev_dpdk_get_carrier_resets(const struct netdev *netdev_)
914 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
915 long long int carrier_resets;
917 ovs_mutex_lock(&dev->mutex);
918 carrier_resets = dev->link_reset_cnt;
919 ovs_mutex_unlock(&dev->mutex);
921 return carrier_resets;
925 netdev_dpdk_set_miimon(struct netdev *netdev_ OVS_UNUSED,
926 long long int interval OVS_UNUSED)
932 netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
933 enum netdev_flags off, enum netdev_flags on,
934 enum netdev_flags *old_flagsp)
935 OVS_REQUIRES(dev->mutex)
939 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
943 *old_flagsp = dev->flags;
947 if (dev->flags == *old_flagsp) {
951 if (dev->flags & NETDEV_UP) {
952 err = rte_eth_dev_start(dev->port_id);
957 if (dev->flags & NETDEV_PROMISC) {
958 rte_eth_promiscuous_enable(dev->port_id);
961 if (!(dev->flags & NETDEV_UP)) {
962 rte_eth_dev_stop(dev->port_id);
969 netdev_dpdk_update_flags(struct netdev *netdev_,
970 enum netdev_flags off, enum netdev_flags on,
971 enum netdev_flags *old_flagsp)
973 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
976 ovs_mutex_lock(&netdev->mutex);
977 error = netdev_dpdk_update_flags__(netdev, off, on, old_flagsp);
978 ovs_mutex_unlock(&netdev->mutex);
984 netdev_dpdk_get_status(const struct netdev *netdev_, struct smap *args)
986 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
987 struct rte_eth_dev_info dev_info;
989 if (dev->port_id <= 0)
992 ovs_mutex_lock(&dev->mutex);
993 rte_eth_dev_info_get(dev->port_id, &dev_info);
994 ovs_mutex_unlock(&dev->mutex);
996 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
998 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
999 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1000 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
1001 smap_add_format(args, "max_rx_pktlen", "%u", dev_info.max_rx_pktlen);
1002 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
1003 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
1004 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
1005 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
1006 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
1007 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
1009 smap_add_format(args, "pci-vendor_id", "0x%u", dev_info.pci_dev->id.vendor_id);
1010 smap_add_format(args, "pci-device_id", "0x%x", dev_info.pci_dev->id.device_id);
1016 netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
1017 OVS_REQUIRES(dev->mutex)
1019 enum netdev_flags old_flags;
1022 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1024 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1029 netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
1030 const char *argv[], void *aux OVS_UNUSED)
1034 if (!strcasecmp(argv[argc - 1], "up")) {
1036 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1039 unixctl_command_reply_error(conn, "Invalid Admin State");
1044 struct netdev *netdev = netdev_from_name(argv[1]);
1045 if (netdev && is_dpdk_class(netdev->netdev_class)) {
1046 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
1048 ovs_mutex_lock(&dpdk_dev->mutex);
1049 netdev_dpdk_set_admin_state__(dpdk_dev, up);
1050 ovs_mutex_unlock(&dpdk_dev->mutex);
1052 netdev_close(netdev);
1054 unixctl_command_reply_error(conn, "Not a DPDK Interface");
1055 netdev_close(netdev);
1059 struct netdev_dpdk *netdev;
1061 ovs_mutex_lock(&dpdk_mutex);
1062 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
1063 ovs_mutex_lock(&netdev->mutex);
1064 netdev_dpdk_set_admin_state__(netdev, up);
1065 ovs_mutex_unlock(&netdev->mutex);
1067 ovs_mutex_unlock(&dpdk_mutex);
1069 unixctl_command_reply(conn, "OK");
1073 dpdk_class_init(void)
1077 if (rte_eal_init_ret) {
1081 result = rte_pmd_init_all();
1083 VLOG_ERR("Cannot init PMD");
1087 result = rte_eal_pci_probe();
1089 VLOG_ERR("Cannot probe PCI");
1093 if (rte_eth_dev_count() < 1) {
1094 VLOG_ERR("No Ethernet devices found. Try assigning ports to UIO.");
1097 VLOG_INFO("Ethernet Device Count: %d", (int)rte_eth_dev_count());
1099 list_init(&dpdk_list);
1100 list_init(&dpdk_mp_list);
1102 unixctl_command_register("netdev-dpdk/set-admin-state",
1103 "[netdev] up|down", 1, 2,
1104 netdev_dpdk_set_admin_state, NULL);
1106 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
1110 static struct netdev_class netdev_dpdk_class = {
1112 dpdk_class_init, /* init */
1113 NULL, /* netdev_dpdk_run */
1114 NULL, /* netdev_dpdk_wait */
1117 netdev_dpdk_construct,
1118 netdev_dpdk_destruct,
1119 netdev_dpdk_dealloc,
1120 netdev_dpdk_get_config,
1121 NULL, /* netdev_dpdk_set_config */
1122 NULL, /* get_tunnel_config */
1124 netdev_dpdk_send, /* send */
1125 NULL, /* send_wait */
1127 netdev_dpdk_set_etheraddr,
1128 netdev_dpdk_get_etheraddr,
1129 netdev_dpdk_get_mtu,
1130 netdev_dpdk_set_mtu,
1131 netdev_dpdk_get_ifindex,
1132 netdev_dpdk_get_carrier,
1133 netdev_dpdk_get_carrier_resets,
1134 netdev_dpdk_set_miimon,
1135 netdev_dpdk_get_stats,
1136 netdev_dpdk_set_stats,
1137 netdev_dpdk_get_features,
1138 NULL, /* set_advertisements */
1140 NULL, /* set_policing */
1141 NULL, /* get_qos_types */
1142 NULL, /* get_qos_capabilities */
1145 NULL, /* get_queue */
1146 NULL, /* set_queue */
1147 NULL, /* delete_queue */
1148 NULL, /* get_queue_stats */
1149 NULL, /* queue_dump_start */
1150 NULL, /* queue_dump_next */
1151 NULL, /* queue_dump_done */
1152 NULL, /* dump_queue_stats */
1157 NULL, /* add_router */
1158 NULL, /* get_next_hop */
1159 netdev_dpdk_get_status,
1160 NULL, /* arp_lookup */
1162 netdev_dpdk_update_flags,
1164 netdev_dpdk_rxq_alloc,
1165 netdev_dpdk_rxq_construct,
1166 netdev_dpdk_rxq_destruct,
1167 netdev_dpdk_rxq_dealloc,
1168 netdev_dpdk_rxq_recv,
1169 NULL, /* rxq_wait */
1170 NULL, /* rxq_drain */
1174 dpdk_init(int argc, char **argv)
1178 if (strcmp(argv[1], "--dpdk"))
1184 /* Make sure things are initialized ... */
1185 result = rte_eal_init(argc, argv);
1187 ovs_abort(result, "Cannot init EAL\n");
1190 rte_eal_init_ret = 0;
1196 netdev_dpdk_register(void)
1198 netdev_register_provider(&netdev_dpdk_class);
1202 pmd_thread_setaffinity_cpu(int cpu)
1208 CPU_SET(cpu, &cpuset);
1209 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
1211 VLOG_ERR("Thread affinity error %d",err);
1214 RTE_PER_LCORE(_lcore_id) = cpu;