+ const char *pstream;
+ const char *pcap;
+
+ ovs_mutex_lock(&netdev->mutex);
+ netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
+
+ pstream = smap_get(args, "pstream");
+ if (!pstream
+ || !netdev->pstream
+ || strcmp(pstream_get_name(netdev->pstream), pstream)) {
+ pstream_close(netdev->pstream);
+ netdev->pstream = NULL;
+
+ if (pstream) {
+ int error;
+
+ error = pstream_open(pstream, &netdev->pstream, DSCP_DEFAULT);
+ if (error) {
+ VLOG_WARN("%s: open failed (%s)",
+ pstream, ovs_strerror(error));
+ }
+ }
+ }
+
+ if (netdev->rx_pcap) {
+ fclose(netdev->rx_pcap);
+ }
+ if (netdev->tx_pcap && netdev->tx_pcap != netdev->rx_pcap) {
+ fclose(netdev->tx_pcap);
+ }
+ netdev->rx_pcap = netdev->tx_pcap = NULL;
+ pcap = smap_get(args, "pcap");
+ if (pcap) {
+ netdev->rx_pcap = netdev->tx_pcap = pcap_open(pcap, "ab");
+ } else {
+ const char *rx_pcap = smap_get(args, "rx_pcap");
+ const char *tx_pcap = smap_get(args, "tx_pcap");
+
+ if (rx_pcap) {
+ netdev->rx_pcap = pcap_open(rx_pcap, "ab");
+ }
+ if (tx_pcap) {
+ netdev->tx_pcap = pcap_open(tx_pcap, "ab");
+ }
+ }
+
+ ovs_mutex_unlock(&netdev->mutex);
+
+ return 0;
+}
+
+static struct netdev_rx *
+netdev_dummy_rx_alloc(void)
+{
+ struct netdev_rx_dummy *rx = xzalloc(sizeof *rx);
+ return &rx->up;
+}
+
+static int
+netdev_dummy_rx_construct(struct netdev_rx *rx_)
+{
+ struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
+ struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
+
+ ovs_mutex_lock(&netdev->mutex);
+ list_push_back(&netdev->rxes, &rx->node);
+ list_init(&rx->recv_queue);
+ rx->recv_queue_len = 0;
+ ovs_mutex_unlock(&netdev->mutex);
+
+ return 0;
+}
+
+static void
+netdev_dummy_rx_destruct(struct netdev_rx *rx_)
+{
+ struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
+ struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
+
+ ovs_mutex_lock(&netdev->mutex);
+ list_remove(&rx->node);
+ ofpbuf_list_delete(&rx->recv_queue);
+ ovs_mutex_unlock(&netdev->mutex);
+}
+
+static void
+netdev_dummy_rx_dealloc(struct netdev_rx *rx_)
+{
+ struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
+
+ free(rx);
+}
+
+static int
+netdev_dummy_rx_recv(struct netdev_rx *rx_, void *buffer, size_t size)
+{
+ struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
+ struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
+ struct ofpbuf *packet;
+ int retval;
+
+ ovs_mutex_lock(&netdev->mutex);
+ if (!list_is_empty(&rx->recv_queue)) {
+ packet = ofpbuf_from_list(list_pop_front(&rx->recv_queue));
+ rx->recv_queue_len--;
+ } else {
+ packet = NULL;
+ }
+ ovs_mutex_unlock(&netdev->mutex);
+
+ if (!packet) {
+ return -EAGAIN;
+ }
+
+ if (packet->size <= size) {
+ memcpy(buffer, packet->data, packet->size);
+ retval = packet->size;
+
+ ovs_mutex_lock(&netdev->mutex);
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += packet->size;
+ ovs_mutex_unlock(&netdev->mutex);
+ } else {
+ retval = -EMSGSIZE;
+ }
+ ofpbuf_delete(packet);
+
+ return retval;
+}
+
+static void
+netdev_dummy_rx_wait(struct netdev_rx *rx_)
+{
+ struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
+ struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
+
+ ovs_mutex_lock(&netdev->mutex);
+ if (!list_is_empty(&rx->recv_queue)) {
+ poll_immediate_wake();
+ }
+ ovs_mutex_unlock(&netdev->mutex);
+}
+
+static int
+netdev_dummy_rx_drain(struct netdev_rx *rx_)
+{
+ struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
+ struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
+
+ ovs_mutex_lock(&netdev->mutex);
+ ofpbuf_list_delete(&rx->recv_queue);
+ rx->recv_queue_len = 0;
+ ovs_mutex_unlock(&netdev->mutex);
+
+ return 0;
+}
+
+static int
+netdev_dummy_send(struct netdev *netdev, const void *buffer, size_t size)
+{
+ struct netdev_dummy *dev = netdev_dummy_cast(netdev);
+ size_t i;
+
+ if (size < ETH_HEADER_LEN) {
+ return EMSGSIZE;
+ } else {
+ const struct eth_header *eth = buffer;
+ int max_size;
+
+ ovs_mutex_lock(&dev->mutex);
+ max_size = dev->mtu + ETH_HEADER_LEN;
+ ovs_mutex_unlock(&dev->mutex);
+
+ if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
+ max_size += VLAN_HEADER_LEN;
+ }
+ if (size > max_size) {
+ return EMSGSIZE;
+ }
+ }
+
+ ovs_mutex_lock(&dev->mutex);
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += size;
+
+ if (dev->tx_pcap) {
+ struct ofpbuf packet;
+
+ ofpbuf_use_const(&packet, buffer, size);
+ pcap_write(dev->tx_pcap, &packet);
+ fflush(dev->tx_pcap);
+ }
+
+ for (i = 0; i < dev->n_streams; i++) {
+ struct dummy_stream *s = &dev->streams[i];
+
+ if (list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
+ struct ofpbuf *b;
+
+ b = ofpbuf_clone_data_with_headroom(buffer, size, 2);
+ put_unaligned_be16(ofpbuf_push_uninit(b, 2), htons(size));
+ list_push_back(&s->txq, &b->list_node);
+ }
+ }
+ ovs_mutex_unlock(&dev->mutex);
+
+ return 0;