2 * Copyright (c) 2010, 2011, 2012, 2013 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
23 #include "connectivity.h"
26 #include "netdev-provider.h"
27 #include "netdev-vport.h"
29 #include "ofp-print.h"
32 #include "pcap-file.h"
33 #include "poll-loop.h"
38 #include "unaligned.h"
43 VLOG_DEFINE_THIS_MODULE(netdev_dummy);
46 struct stream *stream;
51 /* Protects 'dummy_list'. */
52 static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER;
54 /* Contains all 'struct dummy_dev's. */
55 static struct list dummy_list OVS_GUARDED_BY(dummy_list_mutex)
56 = LIST_INITIALIZER(&dummy_list);
62 struct list list_node OVS_GUARDED_BY(dummy_list_mutex);
64 /* Protects all members below. */
65 struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
67 uint8_t hwaddr[ETH_ADDR_LEN] OVS_GUARDED;
69 struct netdev_stats stats OVS_GUARDED;
70 enum netdev_flags flags OVS_GUARDED;
71 int ifindex OVS_GUARDED;
73 struct pstream *pstream OVS_GUARDED;
74 struct dummy_stream *streams OVS_GUARDED;
75 size_t n_streams OVS_GUARDED;
77 FILE *tx_pcap, *rx_pcap OVS_GUARDED;
79 struct list rxes OVS_GUARDED; /* List of child "netdev_rx_dummy"s. */
82 /* Max 'recv_queue_len' in struct netdev_dummy. */
83 #define NETDEV_DUMMY_MAX_QUEUE 100
85 struct netdev_rx_dummy {
87 struct list node; /* In netdev_dummy's "rxes" list. */
88 struct list recv_queue;
89 int recv_queue_len; /* list_size(&recv_queue). */
90 struct seq *seq; /* Reports newly queued packets. */
93 static unixctl_cb_func netdev_dummy_set_admin_state;
94 static int netdev_dummy_construct(struct netdev *);
95 static void netdev_dummy_queue_packet(struct netdev_dummy *, struct ofpbuf *);
97 static void dummy_stream_close(struct dummy_stream *);
100 is_dummy_class(const struct netdev_class *class)
102 return class->construct == netdev_dummy_construct;
105 static struct netdev_dummy *
106 netdev_dummy_cast(const struct netdev *netdev)
108 ovs_assert(is_dummy_class(netdev_get_class(netdev)));
109 return CONTAINER_OF(netdev, struct netdev_dummy, up);
112 static struct netdev_rx_dummy *
113 netdev_rx_dummy_cast(const struct netdev_rx *rx)
115 ovs_assert(is_dummy_class(netdev_get_class(rx->netdev)));
116 return CONTAINER_OF(rx, struct netdev_rx_dummy, up);
120 netdev_dummy_run(void)
122 struct netdev_dummy *dev;
124 ovs_mutex_lock(&dummy_list_mutex);
125 LIST_FOR_EACH (dev, list_node, &dummy_list) {
128 ovs_mutex_lock(&dev->mutex);
131 struct stream *new_stream;
134 error = pstream_accept(dev->pstream, &new_stream);
136 struct dummy_stream *s;
138 dev->streams = xrealloc(dev->streams,
139 ((dev->n_streams + 1)
140 * sizeof *dev->streams));
141 s = &dev->streams[dev->n_streams++];
142 s->stream = new_stream;
143 ofpbuf_init(&s->rxbuf, 2048);
145 } else if (error != EAGAIN) {
146 VLOG_WARN("%s: accept failed (%s)",
147 pstream_get_name(dev->pstream), ovs_strerror(error));
148 pstream_close(dev->pstream);
153 for (i = 0; i < dev->n_streams; i++) {
154 struct dummy_stream *s = &dev->streams[i];
158 stream_run(s->stream);
160 if (!list_is_empty(&s->txq)) {
161 struct ofpbuf *txbuf;
164 txbuf = ofpbuf_from_list(list_front(&s->txq));
165 retval = stream_send(s->stream, txbuf->data, txbuf->size);
167 ofpbuf_pull(txbuf, retval);
169 list_remove(&txbuf->list_node);
170 ofpbuf_delete(txbuf);
172 } else if (retval != -EAGAIN) {
178 if (s->rxbuf.size < 2) {
179 n = 2 - s->rxbuf.size;
183 frame_len = ntohs(get_unaligned_be16(s->rxbuf.data));
184 if (frame_len < ETH_HEADER_LEN) {
188 n = (2 + frame_len) - s->rxbuf.size;
195 ofpbuf_prealloc_tailroom(&s->rxbuf, n);
196 retval = stream_recv(s->stream, ofpbuf_tail(&s->rxbuf), n);
198 s->rxbuf.size += retval;
199 if (retval == n && s->rxbuf.size > 2) {
200 ofpbuf_pull(&s->rxbuf, 2);
201 netdev_dummy_queue_packet(dev,
202 ofpbuf_clone(&s->rxbuf));
203 ofpbuf_clear(&s->rxbuf);
205 } else if (retval != -EAGAIN) {
206 error = (retval < 0 ? -retval
207 : s->rxbuf.size ? EPROTO
213 VLOG_DBG("%s: closing connection (%s)",
214 stream_get_name(s->stream),
215 ovs_retval_to_string(error));
216 dummy_stream_close(&dev->streams[i]);
217 dev->streams[i] = dev->streams[--dev->n_streams];
221 ovs_mutex_unlock(&dev->mutex);
223 ovs_mutex_unlock(&dummy_list_mutex);
227 dummy_stream_close(struct dummy_stream *s)
229 stream_close(s->stream);
230 ofpbuf_uninit(&s->rxbuf);
231 ofpbuf_list_delete(&s->txq);
235 netdev_dummy_wait(void)
237 struct netdev_dummy *dev;
239 ovs_mutex_lock(&dummy_list_mutex);
240 LIST_FOR_EACH (dev, list_node, &dummy_list) {
243 ovs_mutex_lock(&dev->mutex);
245 pstream_wait(dev->pstream);
247 for (i = 0; i < dev->n_streams; i++) {
248 struct dummy_stream *s = &dev->streams[i];
250 stream_run_wait(s->stream);
251 if (!list_is_empty(&s->txq)) {
252 stream_send_wait(s->stream);
254 stream_recv_wait(s->stream);
256 ovs_mutex_unlock(&dev->mutex);
258 ovs_mutex_unlock(&dummy_list_mutex);
261 static struct netdev *
262 netdev_dummy_alloc(void)
264 struct netdev_dummy *netdev = xzalloc(sizeof *netdev);
269 netdev_dummy_construct(struct netdev *netdev_)
271 static atomic_uint next_n = ATOMIC_VAR_INIT(0xaa550000);
272 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
275 atomic_add(&next_n, 1, &n);
277 ovs_mutex_init(&netdev->mutex);
278 ovs_mutex_lock(&netdev->mutex);
279 netdev->hwaddr[0] = 0xaa;
280 netdev->hwaddr[1] = 0x55;
281 netdev->hwaddr[2] = n >> 24;
282 netdev->hwaddr[3] = n >> 16;
283 netdev->hwaddr[4] = n >> 8;
284 netdev->hwaddr[5] = n;
287 netdev->ifindex = -EOPNOTSUPP;
289 netdev->pstream = NULL;
290 netdev->streams = NULL;
291 netdev->n_streams = 0;
293 list_init(&netdev->rxes);
294 ovs_mutex_unlock(&netdev->mutex);
296 ovs_mutex_lock(&dummy_list_mutex);
297 list_push_back(&dummy_list, &netdev->list_node);
298 ovs_mutex_unlock(&dummy_list_mutex);
304 netdev_dummy_destruct(struct netdev *netdev_)
306 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
309 ovs_mutex_lock(&dummy_list_mutex);
310 list_remove(&netdev->list_node);
311 ovs_mutex_unlock(&dummy_list_mutex);
313 ovs_mutex_lock(&netdev->mutex);
314 pstream_close(netdev->pstream);
315 for (i = 0; i < netdev->n_streams; i++) {
316 dummy_stream_close(&netdev->streams[i]);
318 free(netdev->streams);
319 ovs_mutex_unlock(&netdev->mutex);
320 ovs_mutex_destroy(&netdev->mutex);
324 netdev_dummy_dealloc(struct netdev *netdev_)
326 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
332 netdev_dummy_get_config(const struct netdev *netdev_, struct smap *args)
334 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
336 ovs_mutex_lock(&netdev->mutex);
338 if (netdev->ifindex >= 0) {
339 smap_add_format(args, "ifindex", "%d", netdev->ifindex);
342 if (netdev->pstream) {
343 smap_add(args, "pstream", pstream_get_name(netdev->pstream));
346 ovs_mutex_unlock(&netdev->mutex);
351 netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args)
353 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
357 ovs_mutex_lock(&netdev->mutex);
358 netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
360 pstream = smap_get(args, "pstream");
363 || strcmp(pstream_get_name(netdev->pstream), pstream)) {
364 pstream_close(netdev->pstream);
365 netdev->pstream = NULL;
370 error = pstream_open(pstream, &netdev->pstream, DSCP_DEFAULT);
372 VLOG_WARN("%s: open failed (%s)",
373 pstream, ovs_strerror(error));
378 if (netdev->rx_pcap) {
379 fclose(netdev->rx_pcap);
381 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rx_pcap) {
382 fclose(netdev->tx_pcap);
384 netdev->rx_pcap = netdev->tx_pcap = NULL;
385 pcap = smap_get(args, "pcap");
387 netdev->rx_pcap = netdev->tx_pcap = pcap_open(pcap, "ab");
389 const char *rx_pcap = smap_get(args, "rx_pcap");
390 const char *tx_pcap = smap_get(args, "tx_pcap");
393 netdev->rx_pcap = pcap_open(rx_pcap, "ab");
396 netdev->tx_pcap = pcap_open(tx_pcap, "ab");
400 ovs_mutex_unlock(&netdev->mutex);
405 static struct netdev_rx *
406 netdev_dummy_rx_alloc(void)
408 struct netdev_rx_dummy *rx = xzalloc(sizeof *rx);
413 netdev_dummy_rx_construct(struct netdev_rx *rx_)
415 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
416 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
418 ovs_mutex_lock(&netdev->mutex);
419 list_push_back(&netdev->rxes, &rx->node);
420 list_init(&rx->recv_queue);
421 rx->recv_queue_len = 0;
422 rx->seq = seq_create();
423 ovs_mutex_unlock(&netdev->mutex);
429 netdev_dummy_rx_destruct(struct netdev_rx *rx_)
431 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
432 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
434 ovs_mutex_lock(&netdev->mutex);
435 list_remove(&rx->node);
436 ofpbuf_list_delete(&rx->recv_queue);
437 ovs_mutex_unlock(&netdev->mutex);
438 seq_destroy(rx->seq);
442 netdev_dummy_rx_dealloc(struct netdev_rx *rx_)
444 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
450 netdev_dummy_rx_recv(struct netdev_rx *rx_, struct ofpbuf *buffer)
452 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
453 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
454 struct ofpbuf *packet;
457 ovs_mutex_lock(&netdev->mutex);
458 if (!list_is_empty(&rx->recv_queue)) {
459 packet = ofpbuf_from_list(list_pop_front(&rx->recv_queue));
460 rx->recv_queue_len--;
464 ovs_mutex_unlock(&netdev->mutex);
470 if (packet->size <= ofpbuf_tailroom(buffer)) {
471 memcpy(buffer->data, packet->data, packet->size);
472 buffer->size += packet->size;
475 ovs_mutex_lock(&netdev->mutex);
476 netdev->stats.rx_packets++;
477 netdev->stats.rx_bytes += packet->size;
478 ovs_mutex_unlock(&netdev->mutex);
482 ofpbuf_delete(packet);
488 netdev_dummy_rx_wait(struct netdev_rx *rx_)
490 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
491 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
492 uint64_t seq = seq_read(rx->seq);
494 ovs_mutex_lock(&netdev->mutex);
495 if (!list_is_empty(&rx->recv_queue)) {
496 poll_immediate_wake();
498 seq_wait(rx->seq, seq);
500 ovs_mutex_unlock(&netdev->mutex);
504 netdev_dummy_rx_drain(struct netdev_rx *rx_)
506 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
507 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
509 ovs_mutex_lock(&netdev->mutex);
510 ofpbuf_list_delete(&rx->recv_queue);
511 rx->recv_queue_len = 0;
512 ovs_mutex_unlock(&netdev->mutex);
520 netdev_dummy_send(struct netdev *netdev, const void *buffer, size_t size)
522 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
525 if (size < ETH_HEADER_LEN) {
528 const struct eth_header *eth = buffer;
531 ovs_mutex_lock(&dev->mutex);
532 max_size = dev->mtu + ETH_HEADER_LEN;
533 ovs_mutex_unlock(&dev->mutex);
535 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
536 max_size += VLAN_HEADER_LEN;
538 if (size > max_size) {
543 ovs_mutex_lock(&dev->mutex);
544 dev->stats.tx_packets++;
545 dev->stats.tx_bytes += size;
548 struct ofpbuf packet;
550 ofpbuf_use_const(&packet, buffer, size);
551 pcap_write(dev->tx_pcap, &packet);
552 fflush(dev->tx_pcap);
555 for (i = 0; i < dev->n_streams; i++) {
556 struct dummy_stream *s = &dev->streams[i];
558 if (list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
561 b = ofpbuf_clone_data_with_headroom(buffer, size, 2);
562 put_unaligned_be16(ofpbuf_push_uninit(b, 2), htons(size));
563 list_push_back(&s->txq, &b->list_node);
566 ovs_mutex_unlock(&dev->mutex);
572 netdev_dummy_set_etheraddr(struct netdev *netdev,
573 const uint8_t mac[ETH_ADDR_LEN])
575 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
577 ovs_mutex_lock(&dev->mutex);
578 if (!eth_addr_equals(dev->hwaddr, mac)) {
579 memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
580 seq_change(connectivity_seq_get());
582 ovs_mutex_unlock(&dev->mutex);
588 netdev_dummy_get_etheraddr(const struct netdev *netdev,
589 uint8_t mac[ETH_ADDR_LEN])
591 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
593 ovs_mutex_lock(&dev->mutex);
594 memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
595 ovs_mutex_unlock(&dev->mutex);
601 netdev_dummy_get_mtu(const struct netdev *netdev, int *mtup)
603 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
605 ovs_mutex_lock(&dev->mutex);
607 ovs_mutex_unlock(&dev->mutex);
613 netdev_dummy_set_mtu(const struct netdev *netdev, int mtu)
615 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
617 ovs_mutex_lock(&dev->mutex);
619 ovs_mutex_unlock(&dev->mutex);
625 netdev_dummy_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
627 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
629 ovs_mutex_lock(&dev->mutex);
631 ovs_mutex_unlock(&dev->mutex);
637 netdev_dummy_set_stats(struct netdev *netdev, const struct netdev_stats *stats)
639 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
641 ovs_mutex_lock(&dev->mutex);
643 ovs_mutex_unlock(&dev->mutex);
649 netdev_dummy_get_ifindex(const struct netdev *netdev)
651 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
654 ovs_mutex_lock(&dev->mutex);
655 ifindex = dev->ifindex;
656 ovs_mutex_unlock(&dev->mutex);
662 netdev_dummy_update_flags__(struct netdev_dummy *netdev,
663 enum netdev_flags off, enum netdev_flags on,
664 enum netdev_flags *old_flagsp)
665 OVS_REQUIRES(netdev->mutex)
667 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
671 *old_flagsp = netdev->flags;
673 netdev->flags &= ~off;
674 if (*old_flagsp != netdev->flags) {
675 seq_change(connectivity_seq_get());
682 netdev_dummy_update_flags(struct netdev *netdev_,
683 enum netdev_flags off, enum netdev_flags on,
684 enum netdev_flags *old_flagsp)
686 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
689 ovs_mutex_lock(&netdev->mutex);
690 error = netdev_dummy_update_flags__(netdev, off, on, old_flagsp);
691 ovs_mutex_unlock(&netdev->mutex);
696 /* Helper functions. */
698 static const struct netdev_class dummy_class = {
705 netdev_dummy_construct,
706 netdev_dummy_destruct,
707 netdev_dummy_dealloc,
708 netdev_dummy_get_config,
709 netdev_dummy_set_config,
710 NULL, /* get_tunnel_config */
712 netdev_dummy_send, /* send */
713 NULL, /* send_wait */
715 netdev_dummy_set_etheraddr,
716 netdev_dummy_get_etheraddr,
717 netdev_dummy_get_mtu,
718 netdev_dummy_set_mtu,
719 netdev_dummy_get_ifindex,
720 NULL, /* get_carrier */
721 NULL, /* get_carrier_resets */
722 NULL, /* get_miimon */
723 netdev_dummy_get_stats,
724 netdev_dummy_set_stats,
726 NULL, /* get_features */
727 NULL, /* set_advertisements */
729 NULL, /* set_policing */
730 NULL, /* get_qos_types */
731 NULL, /* get_qos_capabilities */
734 NULL, /* get_queue */
735 NULL, /* set_queue */
736 NULL, /* delete_queue */
737 NULL, /* get_queue_stats */
738 NULL, /* queue_dump_start */
739 NULL, /* queue_dump_next */
740 NULL, /* queue_dump_done */
741 NULL, /* dump_queue_stats */
746 NULL, /* add_router */
747 NULL, /* get_next_hop */
748 NULL, /* get_status */
749 NULL, /* arp_lookup */
751 netdev_dummy_update_flags,
753 netdev_dummy_rx_alloc,
754 netdev_dummy_rx_construct,
755 netdev_dummy_rx_destruct,
756 netdev_dummy_rx_dealloc,
757 netdev_dummy_rx_recv,
758 netdev_dummy_rx_wait,
759 netdev_dummy_rx_drain,
762 static struct ofpbuf *
763 eth_from_packet_or_flow(const char *s)
765 enum odp_key_fitness fitness;
766 struct ofpbuf *packet;
767 struct ofpbuf odp_key;
771 if (!eth_from_hex(s, &packet)) {
775 /* Convert string to datapath key.
777 * It would actually be nicer to parse an OpenFlow-like flow key here, but
778 * the code for that currently calls exit() on parse error. We have to
779 * settle for parsing a datapath key for now.
781 ofpbuf_init(&odp_key, 0);
782 error = odp_flow_from_string(s, NULL, &odp_key, NULL);
784 ofpbuf_uninit(&odp_key);
788 /* Convert odp_key to flow. */
789 fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow);
790 if (fitness == ODP_FIT_ERROR) {
791 ofpbuf_uninit(&odp_key);
795 packet = ofpbuf_new(0);
796 flow_compose(packet, &flow);
798 ofpbuf_uninit(&odp_key);
803 netdev_dummy_queue_packet__(struct netdev_rx_dummy *rx, struct ofpbuf *packet)
805 list_push_back(&rx->recv_queue, &packet->list_node);
806 rx->recv_queue_len++;
811 netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct ofpbuf *packet)
812 OVS_REQUIRES(dummy->mutex)
814 struct netdev_rx_dummy *rx, *prev;
816 if (dummy->rx_pcap) {
817 pcap_write(dummy->rx_pcap, packet);
818 fflush(dummy->rx_pcap);
821 LIST_FOR_EACH (rx, node, &dummy->rxes) {
822 if (rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
824 netdev_dummy_queue_packet__(prev, ofpbuf_clone(packet));
830 netdev_dummy_queue_packet__(prev, packet);
832 ofpbuf_delete(packet);
837 netdev_dummy_receive(struct unixctl_conn *conn,
838 int argc, const char *argv[], void *aux OVS_UNUSED)
840 struct netdev_dummy *dummy_dev;
841 struct netdev *netdev;
844 netdev = netdev_from_name(argv[1]);
845 if (!netdev || !is_dummy_class(netdev->netdev_class)) {
846 unixctl_command_reply_error(conn, "no such dummy netdev");
849 dummy_dev = netdev_dummy_cast(netdev);
851 for (i = 2; i < argc; i++) {
852 struct ofpbuf *packet;
854 packet = eth_from_packet_or_flow(argv[i]);
856 unixctl_command_reply_error(conn, "bad packet syntax");
860 ovs_mutex_lock(&dummy_dev->mutex);
861 netdev_dummy_queue_packet(dummy_dev, packet);
862 ovs_mutex_unlock(&dummy_dev->mutex);
865 unixctl_command_reply(conn, NULL);
868 netdev_close(netdev);
872 netdev_dummy_set_admin_state__(struct netdev_dummy *dev, bool admin_state)
873 OVS_REQUIRES(dev->mutex)
875 enum netdev_flags old_flags;
878 netdev_dummy_update_flags__(dev, 0, NETDEV_UP, &old_flags);
880 netdev_dummy_update_flags__(dev, NETDEV_UP, 0, &old_flags);
885 netdev_dummy_set_admin_state(struct unixctl_conn *conn, int argc,
886 const char *argv[], void *aux OVS_UNUSED)
890 if (!strcasecmp(argv[argc - 1], "up")) {
892 } else if ( !strcasecmp(argv[argc - 1], "down")) {
895 unixctl_command_reply_error(conn, "Invalid Admin State");
900 struct netdev *netdev = netdev_from_name(argv[1]);
901 if (netdev && is_dummy_class(netdev->netdev_class)) {
902 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
904 ovs_mutex_lock(&dummy_dev->mutex);
905 netdev_dummy_set_admin_state__(dummy_dev, up);
906 ovs_mutex_unlock(&dummy_dev->mutex);
908 netdev_close(netdev);
910 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
911 netdev_close(netdev);
915 struct netdev_dummy *netdev;
917 ovs_mutex_lock(&dummy_list_mutex);
918 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
919 ovs_mutex_lock(&netdev->mutex);
920 netdev_dummy_set_admin_state__(netdev, up);
921 ovs_mutex_unlock(&netdev->mutex);
923 ovs_mutex_unlock(&dummy_list_mutex);
925 unixctl_command_reply(conn, "OK");
929 netdev_dummy_register(bool override)
931 unixctl_command_register("netdev-dummy/receive", "NAME PACKET|FLOW...",
932 2, INT_MAX, netdev_dummy_receive, NULL);
933 unixctl_command_register("netdev-dummy/set-admin-state",
934 "[netdev] up|down", 1, 2,
935 netdev_dummy_set_admin_state, NULL);
942 netdev_enumerate_types(&types);
943 SSET_FOR_EACH (type, &types) {
944 if (!strcmp(type, "patch")) {
947 if (!netdev_unregister_provider(type)) {
948 struct netdev_class *class;
951 class = xmemdup(&dummy_class, sizeof dummy_class);
952 class->type = xstrdup(type);
953 error = netdev_register_provider(class);
955 VLOG_ERR("%s: failed to register netdev provider (%s)",
956 type, ovs_strerror(error));
957 free(CONST_CAST(char *, class->type));
962 sset_destroy(&types);
964 netdev_register_provider(&dummy_class);
966 netdev_vport_tunnel_register();