2 * Copyright (c) 2010, 2011, 2012, 2013 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
23 #include "connectivity.h"
26 #include "netdev-provider.h"
27 #include "netdev-vport.h"
29 #include "ofp-print.h"
32 #include "pcap-file.h"
33 #include "poll-loop.h"
38 #include "unaligned.h"
41 #include "reconnect.h"
44 VLOG_DEFINE_THIS_MODULE(netdev_dummy);
48 struct dummy_packet_stream {
49 struct stream *stream;
54 enum dummy_packet_conn_type {
55 NONE, /* No connection is configured. */
56 PASSIVE, /* Listener. */
57 ACTIVE /* Connect to listener. */
60 struct dummy_packet_pconn {
61 struct pstream *pstream;
62 struct dummy_packet_stream *streams;
66 struct dummy_packet_rconn {
67 struct dummy_packet_stream *rstream;
68 struct reconnect *reconnect;
71 struct dummy_packet_conn {
72 enum dummy_packet_conn_type type;
74 struct dummy_packet_pconn pconn;
75 struct dummy_packet_rconn rconn;
79 /* Protects 'dummy_list'. */
80 static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER;
82 /* Contains all 'struct dummy_dev's. */
83 static struct list dummy_list OVS_GUARDED_BY(dummy_list_mutex)
84 = LIST_INITIALIZER(&dummy_list);
90 struct list list_node OVS_GUARDED_BY(dummy_list_mutex);
92 /* Protects all members below. */
93 struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
95 uint8_t hwaddr[ETH_ADDR_LEN] OVS_GUARDED;
97 struct netdev_stats stats OVS_GUARDED;
98 enum netdev_flags flags OVS_GUARDED;
99 int ifindex OVS_GUARDED;
101 struct dummy_packet_conn conn OVS_GUARDED;
103 FILE *tx_pcap, *rx_pcap OVS_GUARDED;
105 struct list rxes OVS_GUARDED; /* List of child "netdev_rx_dummy"s. */
108 /* Max 'recv_queue_len' in struct netdev_dummy. */
109 #define NETDEV_DUMMY_MAX_QUEUE 100
111 struct netdev_rx_dummy {
113 struct list node; /* In netdev_dummy's "rxes" list. */
114 struct list recv_queue;
115 int recv_queue_len; /* list_size(&recv_queue). */
116 struct seq *seq; /* Reports newly queued packets. */
119 static unixctl_cb_func netdev_dummy_set_admin_state;
120 static int netdev_dummy_construct(struct netdev *);
121 static void netdev_dummy_queue_packet(struct netdev_dummy *, struct ofpbuf *);
123 static void dummy_packet_stream_close(struct dummy_packet_stream *);
126 is_dummy_class(const struct netdev_class *class)
128 return class->construct == netdev_dummy_construct;
131 static struct netdev_dummy *
132 netdev_dummy_cast(const struct netdev *netdev)
134 ovs_assert(is_dummy_class(netdev_get_class(netdev)));
135 return CONTAINER_OF(netdev, struct netdev_dummy, up);
138 static struct netdev_rx_dummy *
139 netdev_rx_dummy_cast(const struct netdev_rx *rx)
141 ovs_assert(is_dummy_class(netdev_get_class(rx->netdev)));
142 return CONTAINER_OF(rx, struct netdev_rx_dummy, up);
146 dummy_packet_stream_init(struct dummy_packet_stream *s, struct stream *stream)
148 int rxbuf_size = stream ? 2048 : 0;
150 ofpbuf_init(&s->rxbuf, rxbuf_size);
154 static struct dummy_packet_stream *
155 dummy_packet_stream_create(struct stream *stream)
157 struct dummy_packet_stream *s;
159 s = xzalloc(sizeof *s);
160 dummy_packet_stream_init(s, stream);
166 dummy_packet_stream_wait(struct dummy_packet_stream *s)
168 stream_run_wait(s->stream);
169 if (!list_is_empty(&s->txq)) {
170 stream_send_wait(s->stream);
172 stream_recv_wait(s->stream);
176 dummy_packet_stream_send(struct dummy_packet_stream *s, const void *buffer, size_t size)
178 if (list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
181 b = ofpbuf_clone_data_with_headroom(buffer, size, 2);
182 put_unaligned_be16(ofpbuf_push_uninit(b, 2), htons(size));
183 list_push_back(&s->txq, &b->list_node);
188 dummy_packet_stream_run(struct netdev_dummy *dev, struct dummy_packet_stream *s)
193 stream_run(s->stream);
195 if (!list_is_empty(&s->txq)) {
196 struct ofpbuf *txbuf;
199 txbuf = ofpbuf_from_list(list_front(&s->txq));
200 retval = stream_send(s->stream, txbuf->data, txbuf->size);
203 ofpbuf_pull(txbuf, retval);
205 list_remove(&txbuf->list_node);
206 ofpbuf_delete(txbuf);
208 } else if (retval != -EAGAIN) {
214 if (s->rxbuf.size < 2) {
215 n = 2 - s->rxbuf.size;
219 frame_len = ntohs(get_unaligned_be16(s->rxbuf.data));
220 if (frame_len < ETH_HEADER_LEN) {
224 n = (2 + frame_len) - s->rxbuf.size;
231 ofpbuf_prealloc_tailroom(&s->rxbuf, n);
232 retval = stream_recv(s->stream, ofpbuf_tail(&s->rxbuf), n);
235 s->rxbuf.size += retval;
236 if (retval == n && s->rxbuf.size > 2) {
237 ofpbuf_pull(&s->rxbuf, 2);
238 netdev_dummy_queue_packet(dev,
239 ofpbuf_clone(&s->rxbuf));
240 ofpbuf_clear(&s->rxbuf);
242 } else if (retval != -EAGAIN) {
243 error = (retval < 0 ? -retval
244 : s->rxbuf.size ? EPROTO
253 dummy_packet_stream_close(struct dummy_packet_stream *s)
255 stream_close(s->stream);
256 ofpbuf_uninit(&s->rxbuf);
257 ofpbuf_list_delete(&s->txq);
261 dummy_packet_conn_init(struct dummy_packet_conn *conn)
263 memset(conn, 0, sizeof *conn);
268 dummy_packet_conn_get_config(struct dummy_packet_conn *conn, struct smap *args)
271 switch (conn->type) {
273 smap_add(args, "pstream", pstream_get_name(conn->u.pconn.pstream));
277 smap_add(args, "stream", stream_get_name(conn->u.rconn.rstream->stream));
287 dummy_packet_conn_close(struct dummy_packet_conn *conn)
290 struct dummy_packet_pconn *pconn = &conn->u.pconn;
291 struct dummy_packet_rconn *rconn = &conn->u.rconn;
293 switch (conn->type) {
295 pstream_close(pconn->pstream);
296 for (i = 0; i < pconn->n_streams; i++) {
297 dummy_packet_stream_close(&pconn->streams[i]);
299 free(pconn->streams);
300 pconn->pstream = NULL;
301 pconn->streams = NULL;
305 dummy_packet_stream_close(rconn->rstream);
306 free(rconn->rstream);
307 rconn->rstream = NULL;
308 reconnect_destroy(rconn->reconnect);
309 rconn->reconnect = NULL;
318 memset(conn, 0, sizeof *conn);
322 dummy_packet_conn_set_config(struct dummy_packet_conn *conn,
323 const struct smap *args)
325 const char *pstream = smap_get(args, "pstream");
326 const char *stream = smap_get(args, "stream");
328 if (pstream && stream) {
329 VLOG_WARN("Open failed: both %s and %s are configured",
334 switch (conn->type) {
336 if (!strcmp(pstream_get_name(conn->u.pconn.pstream), pstream)) {
339 dummy_packet_conn_close(conn);
342 if (!strcmp(stream_get_name(conn->u.rconn.rstream->stream), stream)) {
345 dummy_packet_conn_close(conn);
355 error = pstream_open(pstream, &conn->u.pconn.pstream, DSCP_DEFAULT);
357 VLOG_WARN("%s: open failed (%s)", pstream, ovs_strerror(error));
359 conn->type = PASSIVE;
365 struct stream *active_stream;
366 struct reconnect *reconnect;;
368 reconnect = reconnect_create(time_msec());
369 reconnect_set_name(reconnect, stream);
370 reconnect_set_passive(reconnect, false, time_msec());
371 reconnect_enable(reconnect, time_msec());
372 reconnect_set_backoff(reconnect, 100, INT_MAX);
373 reconnect_set_probe_interval(reconnect, 0);
374 conn->u.rconn.reconnect = reconnect;
377 error = stream_open(stream, &active_stream, DSCP_DEFAULT);
378 conn->u.rconn.rstream = dummy_packet_stream_create(active_stream);
382 reconnect_connected(reconnect, time_msec());
386 reconnect_connecting(reconnect, time_msec());
390 reconnect_connect_failed(reconnect, time_msec(), error);
391 stream_close(active_stream);
392 conn->u.rconn.rstream->stream = NULL;
399 dummy_pconn_run(struct netdev_dummy *dev)
400 OVS_REQUIRES(dev->mutex)
402 struct stream *new_stream;
403 struct dummy_packet_pconn *pconn = &dev->conn.u.pconn;
407 error = pstream_accept(pconn->pstream, &new_stream);
409 struct dummy_packet_stream *s;
411 pconn->streams = xrealloc(pconn->streams,
412 ((pconn->n_streams + 1)
414 s = &pconn->streams[pconn->n_streams++];
415 dummy_packet_stream_init(s, new_stream);
416 } else if (error != EAGAIN) {
417 VLOG_WARN("%s: accept failed (%s)",
418 pstream_get_name(pconn->pstream), ovs_strerror(error));
419 pstream_close(pconn->pstream);
420 pconn->pstream = NULL;
421 dev->conn.type = NONE;
424 for (i = 0; i < pconn->n_streams; i++) {
425 struct dummy_packet_stream *s = &pconn->streams[i];
427 error = dummy_packet_stream_run(dev, s);
429 VLOG_DBG("%s: closing connection (%s)",
430 stream_get_name(s->stream),
431 ovs_retval_to_string(error));
432 dummy_packet_stream_close(s);
433 pconn->streams[i] = pconn->streams[--pconn->n_streams];
439 dummy_rconn_run(struct netdev_dummy *dev)
440 OVS_REQUIRES(dev->mutex)
442 struct dummy_packet_rconn *rconn = &dev->conn.u.rconn;
444 switch (reconnect_run(rconn->reconnect, time_msec())) {
445 case RECONNECT_CONNECT:
449 if (rconn->rstream->stream) {
450 error = stream_connect(rconn->rstream->stream);
452 error = stream_open(reconnect_get_name(rconn->reconnect),
453 &rconn->rstream->stream, DSCP_DEFAULT);
458 reconnect_connected(rconn->reconnect, time_msec());
462 reconnect_connecting(rconn->reconnect, time_msec());
466 reconnect_connect_failed(rconn->reconnect, time_msec(), error);
467 stream_close(rconn->rstream->stream);
468 rconn->rstream->stream = NULL;
474 case RECONNECT_DISCONNECT:
475 case RECONNECT_PROBE:
480 if (reconnect_is_connected(rconn->reconnect)) {
483 err = dummy_packet_stream_run(dev, rconn->rstream);
486 reconnect_disconnected(rconn->reconnect, time_msec(), err);
487 stream_close(rconn->rstream->stream);
488 rconn->rstream->stream = NULL;
494 dummy_packet_conn_run(struct netdev_dummy *dev)
495 OVS_REQUIRES(dev->mutex)
497 switch (dev->conn.type) {
499 dummy_pconn_run(dev);
503 dummy_rconn_run(dev);
513 dummy_packet_conn_wait(struct dummy_packet_conn *conn)
516 switch (conn->type) {
518 pstream_wait(conn->u.pconn.pstream);
519 for (i = 0; i < conn->u.pconn.n_streams; i++) {
520 struct dummy_packet_stream *s = &conn->u.pconn.streams[i];
521 dummy_packet_stream_wait(s);
525 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
526 dummy_packet_stream_wait(conn->u.rconn.rstream);
537 dummy_packet_conn_send(struct dummy_packet_conn *conn,
538 const void *buffer, size_t size)
542 switch (conn->type) {
544 for (i = 0; i < conn->u.pconn.n_streams; i++) {
545 struct dummy_packet_stream *s = &conn->u.pconn.streams[i];
547 dummy_packet_stream_send(s, buffer, size);
548 pstream_wait(conn->u.pconn.pstream);
553 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
554 dummy_packet_stream_send(conn->u.rconn.rstream, buffer, size);
555 dummy_packet_stream_wait(conn->u.rconn.rstream);
566 netdev_dummy_run(void)
568 struct netdev_dummy *dev;
570 ovs_mutex_lock(&dummy_list_mutex);
571 LIST_FOR_EACH (dev, list_node, &dummy_list) {
572 ovs_mutex_lock(&dev->mutex);
573 dummy_packet_conn_run(dev);
574 ovs_mutex_unlock(&dev->mutex);
576 ovs_mutex_unlock(&dummy_list_mutex);
580 netdev_dummy_wait(void)
582 struct netdev_dummy *dev;
584 ovs_mutex_lock(&dummy_list_mutex);
585 LIST_FOR_EACH (dev, list_node, &dummy_list) {
586 ovs_mutex_lock(&dev->mutex);
587 dummy_packet_conn_wait(&dev->conn);
588 ovs_mutex_unlock(&dev->mutex);
590 ovs_mutex_unlock(&dummy_list_mutex);
593 static struct netdev *
594 netdev_dummy_alloc(void)
596 struct netdev_dummy *netdev = xzalloc(sizeof *netdev);
601 netdev_dummy_construct(struct netdev *netdev_)
603 static atomic_uint next_n = ATOMIC_VAR_INIT(0xaa550000);
604 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
607 atomic_add(&next_n, 1, &n);
609 ovs_mutex_init(&netdev->mutex);
610 ovs_mutex_lock(&netdev->mutex);
611 netdev->hwaddr[0] = 0xaa;
612 netdev->hwaddr[1] = 0x55;
613 netdev->hwaddr[2] = n >> 24;
614 netdev->hwaddr[3] = n >> 16;
615 netdev->hwaddr[4] = n >> 8;
616 netdev->hwaddr[5] = n;
619 netdev->ifindex = -EOPNOTSUPP;
621 dummy_packet_conn_init(&netdev->conn);
623 list_init(&netdev->rxes);
624 ovs_mutex_unlock(&netdev->mutex);
626 ovs_mutex_lock(&dummy_list_mutex);
627 list_push_back(&dummy_list, &netdev->list_node);
628 ovs_mutex_unlock(&dummy_list_mutex);
634 netdev_dummy_destruct(struct netdev *netdev_)
636 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
638 ovs_mutex_lock(&dummy_list_mutex);
639 list_remove(&netdev->list_node);
640 ovs_mutex_unlock(&dummy_list_mutex);
642 ovs_mutex_lock(&netdev->mutex);
643 dummy_packet_conn_close(&netdev->conn);
644 netdev->conn.type = NONE;
646 ovs_mutex_unlock(&netdev->mutex);
647 ovs_mutex_destroy(&netdev->mutex);
651 netdev_dummy_dealloc(struct netdev *netdev_)
653 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
659 netdev_dummy_get_config(const struct netdev *netdev_, struct smap *args)
661 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
663 ovs_mutex_lock(&netdev->mutex);
665 if (netdev->ifindex >= 0) {
666 smap_add_format(args, "ifindex", "%d", netdev->ifindex);
669 dummy_packet_conn_get_config(&netdev->conn, args);
671 ovs_mutex_unlock(&netdev->mutex);
676 netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args)
678 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
681 ovs_mutex_lock(&netdev->mutex);
682 netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
684 dummy_packet_conn_set_config(&netdev->conn, args);
686 if (netdev->rx_pcap) {
687 fclose(netdev->rx_pcap);
689 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rx_pcap) {
690 fclose(netdev->tx_pcap);
692 netdev->rx_pcap = netdev->tx_pcap = NULL;
693 pcap = smap_get(args, "pcap");
695 netdev->rx_pcap = netdev->tx_pcap = ovs_pcap_open(pcap, "ab");
697 const char *rx_pcap = smap_get(args, "rx_pcap");
698 const char *tx_pcap = smap_get(args, "tx_pcap");
701 netdev->rx_pcap = ovs_pcap_open(rx_pcap, "ab");
704 netdev->tx_pcap = ovs_pcap_open(tx_pcap, "ab");
708 ovs_mutex_unlock(&netdev->mutex);
713 static struct netdev_rx *
714 netdev_dummy_rx_alloc(void)
716 struct netdev_rx_dummy *rx = xzalloc(sizeof *rx);
721 netdev_dummy_rx_construct(struct netdev_rx *rx_)
723 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
724 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
726 ovs_mutex_lock(&netdev->mutex);
727 list_push_back(&netdev->rxes, &rx->node);
728 list_init(&rx->recv_queue);
729 rx->recv_queue_len = 0;
730 rx->seq = seq_create();
731 ovs_mutex_unlock(&netdev->mutex);
737 netdev_dummy_rx_destruct(struct netdev_rx *rx_)
739 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
740 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
742 ovs_mutex_lock(&netdev->mutex);
743 list_remove(&rx->node);
744 ofpbuf_list_delete(&rx->recv_queue);
745 ovs_mutex_unlock(&netdev->mutex);
746 seq_destroy(rx->seq);
750 netdev_dummy_rx_dealloc(struct netdev_rx *rx_)
752 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
758 netdev_dummy_rx_recv(struct netdev_rx *rx_, struct ofpbuf *buffer)
760 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
761 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
762 struct ofpbuf *packet;
765 ovs_mutex_lock(&netdev->mutex);
766 if (!list_is_empty(&rx->recv_queue)) {
767 packet = ofpbuf_from_list(list_pop_front(&rx->recv_queue));
768 rx->recv_queue_len--;
772 ovs_mutex_unlock(&netdev->mutex);
778 if (packet->size <= ofpbuf_tailroom(buffer)) {
779 memcpy(buffer->data, packet->data, packet->size);
780 buffer->size += packet->size;
783 ovs_mutex_lock(&netdev->mutex);
784 netdev->stats.rx_packets++;
785 netdev->stats.rx_bytes += packet->size;
786 ovs_mutex_unlock(&netdev->mutex);
790 ofpbuf_delete(packet);
796 netdev_dummy_rx_wait(struct netdev_rx *rx_)
798 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
799 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
800 uint64_t seq = seq_read(rx->seq);
802 ovs_mutex_lock(&netdev->mutex);
803 if (!list_is_empty(&rx->recv_queue)) {
804 poll_immediate_wake();
806 seq_wait(rx->seq, seq);
808 ovs_mutex_unlock(&netdev->mutex);
812 netdev_dummy_rx_drain(struct netdev_rx *rx_)
814 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
815 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
817 ovs_mutex_lock(&netdev->mutex);
818 ofpbuf_list_delete(&rx->recv_queue);
819 rx->recv_queue_len = 0;
820 ovs_mutex_unlock(&netdev->mutex);
828 netdev_dummy_send(struct netdev *netdev, const void *buffer, size_t size)
830 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
832 if (size < ETH_HEADER_LEN) {
835 const struct eth_header *eth = buffer;
838 ovs_mutex_lock(&dev->mutex);
839 max_size = dev->mtu + ETH_HEADER_LEN;
840 ovs_mutex_unlock(&dev->mutex);
842 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
843 max_size += VLAN_HEADER_LEN;
845 if (size > max_size) {
850 ovs_mutex_lock(&dev->mutex);
851 dev->stats.tx_packets++;
852 dev->stats.tx_bytes += size;
854 dummy_packet_conn_send(&dev->conn, buffer, size);
857 struct ofpbuf packet;
859 ofpbuf_use_const(&packet, buffer, size);
860 ovs_pcap_write(dev->tx_pcap, &packet);
861 fflush(dev->tx_pcap);
864 ovs_mutex_unlock(&dev->mutex);
870 netdev_dummy_set_etheraddr(struct netdev *netdev,
871 const uint8_t mac[ETH_ADDR_LEN])
873 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
875 ovs_mutex_lock(&dev->mutex);
876 if (!eth_addr_equals(dev->hwaddr, mac)) {
877 memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
878 seq_change(connectivity_seq_get());
880 ovs_mutex_unlock(&dev->mutex);
886 netdev_dummy_get_etheraddr(const struct netdev *netdev,
887 uint8_t mac[ETH_ADDR_LEN])
889 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
891 ovs_mutex_lock(&dev->mutex);
892 memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
893 ovs_mutex_unlock(&dev->mutex);
899 netdev_dummy_get_mtu(const struct netdev *netdev, int *mtup)
901 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
903 ovs_mutex_lock(&dev->mutex);
905 ovs_mutex_unlock(&dev->mutex);
911 netdev_dummy_set_mtu(const struct netdev *netdev, int mtu)
913 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
915 ovs_mutex_lock(&dev->mutex);
917 ovs_mutex_unlock(&dev->mutex);
923 netdev_dummy_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
925 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
927 ovs_mutex_lock(&dev->mutex);
929 ovs_mutex_unlock(&dev->mutex);
935 netdev_dummy_set_stats(struct netdev *netdev, const struct netdev_stats *stats)
937 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
939 ovs_mutex_lock(&dev->mutex);
941 ovs_mutex_unlock(&dev->mutex);
947 netdev_dummy_get_ifindex(const struct netdev *netdev)
949 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
952 ovs_mutex_lock(&dev->mutex);
953 ifindex = dev->ifindex;
954 ovs_mutex_unlock(&dev->mutex);
960 netdev_dummy_update_flags__(struct netdev_dummy *netdev,
961 enum netdev_flags off, enum netdev_flags on,
962 enum netdev_flags *old_flagsp)
963 OVS_REQUIRES(netdev->mutex)
965 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
969 *old_flagsp = netdev->flags;
971 netdev->flags &= ~off;
972 if (*old_flagsp != netdev->flags) {
973 seq_change(connectivity_seq_get());
980 netdev_dummy_update_flags(struct netdev *netdev_,
981 enum netdev_flags off, enum netdev_flags on,
982 enum netdev_flags *old_flagsp)
984 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
987 ovs_mutex_lock(&netdev->mutex);
988 error = netdev_dummy_update_flags__(netdev, off, on, old_flagsp);
989 ovs_mutex_unlock(&netdev->mutex);
994 /* Helper functions. */
996 static const struct netdev_class dummy_class = {
1003 netdev_dummy_construct,
1004 netdev_dummy_destruct,
1005 netdev_dummy_dealloc,
1006 netdev_dummy_get_config,
1007 netdev_dummy_set_config,
1008 NULL, /* get_tunnel_config */
1010 netdev_dummy_send, /* send */
1011 NULL, /* send_wait */
1013 netdev_dummy_set_etheraddr,
1014 netdev_dummy_get_etheraddr,
1015 netdev_dummy_get_mtu,
1016 netdev_dummy_set_mtu,
1017 netdev_dummy_get_ifindex,
1018 NULL, /* get_carrier */
1019 NULL, /* get_carrier_resets */
1020 NULL, /* get_miimon */
1021 netdev_dummy_get_stats,
1022 netdev_dummy_set_stats,
1024 NULL, /* get_features */
1025 NULL, /* set_advertisements */
1027 NULL, /* set_policing */
1028 NULL, /* get_qos_types */
1029 NULL, /* get_qos_capabilities */
1032 NULL, /* get_queue */
1033 NULL, /* set_queue */
1034 NULL, /* delete_queue */
1035 NULL, /* get_queue_stats */
1036 NULL, /* queue_dump_start */
1037 NULL, /* queue_dump_next */
1038 NULL, /* queue_dump_done */
1039 NULL, /* dump_queue_stats */
1044 NULL, /* add_router */
1045 NULL, /* get_next_hop */
1046 NULL, /* get_status */
1047 NULL, /* arp_lookup */
1049 netdev_dummy_update_flags,
1051 netdev_dummy_rx_alloc,
1052 netdev_dummy_rx_construct,
1053 netdev_dummy_rx_destruct,
1054 netdev_dummy_rx_dealloc,
1055 netdev_dummy_rx_recv,
1056 netdev_dummy_rx_wait,
1057 netdev_dummy_rx_drain,
1060 static struct ofpbuf *
1061 eth_from_packet_or_flow(const char *s)
1063 enum odp_key_fitness fitness;
1064 struct ofpbuf *packet;
1065 struct ofpbuf odp_key;
1069 if (!eth_from_hex(s, &packet)) {
1073 /* Convert string to datapath key.
1075 * It would actually be nicer to parse an OpenFlow-like flow key here, but
1076 * the code for that currently calls exit() on parse error. We have to
1077 * settle for parsing a datapath key for now.
1079 ofpbuf_init(&odp_key, 0);
1080 error = odp_flow_from_string(s, NULL, &odp_key, NULL);
1082 ofpbuf_uninit(&odp_key);
1086 /* Convert odp_key to flow. */
1087 fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow);
1088 if (fitness == ODP_FIT_ERROR) {
1089 ofpbuf_uninit(&odp_key);
1093 packet = ofpbuf_new(0);
1094 flow_compose(packet, &flow);
1096 ofpbuf_uninit(&odp_key);
1101 netdev_dummy_queue_packet__(struct netdev_rx_dummy *rx, struct ofpbuf *packet)
1103 list_push_back(&rx->recv_queue, &packet->list_node);
1104 rx->recv_queue_len++;
1105 seq_change(rx->seq);
1109 netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct ofpbuf *packet)
1110 OVS_REQUIRES(dummy->mutex)
1112 struct netdev_rx_dummy *rx, *prev;
1114 if (dummy->rx_pcap) {
1115 ovs_pcap_write(dummy->rx_pcap, packet);
1116 fflush(dummy->rx_pcap);
1119 LIST_FOR_EACH (rx, node, &dummy->rxes) {
1120 if (rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
1122 netdev_dummy_queue_packet__(prev, ofpbuf_clone(packet));
1128 netdev_dummy_queue_packet__(prev, packet);
1130 ofpbuf_delete(packet);
1135 netdev_dummy_receive(struct unixctl_conn *conn,
1136 int argc, const char *argv[], void *aux OVS_UNUSED)
1138 struct netdev_dummy *dummy_dev;
1139 struct netdev *netdev;
1142 netdev = netdev_from_name(argv[1]);
1143 if (!netdev || !is_dummy_class(netdev->netdev_class)) {
1144 unixctl_command_reply_error(conn, "no such dummy netdev");
1147 dummy_dev = netdev_dummy_cast(netdev);
1149 for (i = 2; i < argc; i++) {
1150 struct ofpbuf *packet;
1152 packet = eth_from_packet_or_flow(argv[i]);
1154 unixctl_command_reply_error(conn, "bad packet syntax");
1158 ovs_mutex_lock(&dummy_dev->mutex);
1159 netdev_dummy_queue_packet(dummy_dev, packet);
1160 ovs_mutex_unlock(&dummy_dev->mutex);
1163 unixctl_command_reply(conn, NULL);
1166 netdev_close(netdev);
1170 netdev_dummy_set_admin_state__(struct netdev_dummy *dev, bool admin_state)
1171 OVS_REQUIRES(dev->mutex)
1173 enum netdev_flags old_flags;
1176 netdev_dummy_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1178 netdev_dummy_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1183 netdev_dummy_set_admin_state(struct unixctl_conn *conn, int argc,
1184 const char *argv[], void *aux OVS_UNUSED)
1188 if (!strcasecmp(argv[argc - 1], "up")) {
1190 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1193 unixctl_command_reply_error(conn, "Invalid Admin State");
1198 struct netdev *netdev = netdev_from_name(argv[1]);
1199 if (netdev && is_dummy_class(netdev->netdev_class)) {
1200 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1202 ovs_mutex_lock(&dummy_dev->mutex);
1203 netdev_dummy_set_admin_state__(dummy_dev, up);
1204 ovs_mutex_unlock(&dummy_dev->mutex);
1206 netdev_close(netdev);
1208 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1209 netdev_close(netdev);
1213 struct netdev_dummy *netdev;
1215 ovs_mutex_lock(&dummy_list_mutex);
1216 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1217 ovs_mutex_lock(&netdev->mutex);
1218 netdev_dummy_set_admin_state__(netdev, up);
1219 ovs_mutex_unlock(&netdev->mutex);
1221 ovs_mutex_unlock(&dummy_list_mutex);
1223 unixctl_command_reply(conn, "OK");
1227 netdev_dummy_register(bool override)
1229 unixctl_command_register("netdev-dummy/receive", "NAME PACKET|FLOW...",
1230 2, INT_MAX, netdev_dummy_receive, NULL);
1231 unixctl_command_register("netdev-dummy/set-admin-state",
1232 "[netdev] up|down", 1, 2,
1233 netdev_dummy_set_admin_state, NULL);
1240 netdev_enumerate_types(&types);
1241 SSET_FOR_EACH (type, &types) {
1242 if (!strcmp(type, "patch")) {
1245 if (!netdev_unregister_provider(type)) {
1246 struct netdev_class *class;
1249 class = xmemdup(&dummy_class, sizeof dummy_class);
1250 class->type = xstrdup(type);
1251 error = netdev_register_provider(class);
1253 VLOG_ERR("%s: failed to register netdev provider (%s)",
1254 type, ovs_strerror(error));
1255 free(CONST_CAST(char *, class->type));
1260 sset_destroy(&types);
1262 netdev_register_provider(&dummy_class);
1264 netdev_vport_tunnel_register();