2 * Copyright (c) 2010, 2011, 2012, 2013 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
25 #include "netdev-provider.h"
26 #include "netdev-vport.h"
28 #include "ofp-print.h"
31 #include "pcap-file.h"
32 #include "poll-loop.h"
36 #include "unaligned.h"
41 VLOG_DEFINE_THIS_MODULE(netdev_dummy);
44 struct stream *stream;
49 /* Protects 'dummy_list'. */
50 static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER;
52 /* Contains all 'struct dummy_dev's. */
53 static struct list dummy_list OVS_GUARDED_BY(dummy_list_mutex)
54 = LIST_INITIALIZER(&dummy_list);
60 struct list list_node OVS_GUARDED_BY(dummy_list_mutex);
62 /* Protects all members below. */
63 struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
65 uint8_t hwaddr[ETH_ADDR_LEN] OVS_GUARDED;
67 struct netdev_stats stats OVS_GUARDED;
68 enum netdev_flags flags OVS_GUARDED;
69 unsigned int change_seq OVS_GUARDED;
70 int ifindex OVS_GUARDED;
72 struct pstream *pstream OVS_GUARDED;
73 struct dummy_stream *streams OVS_GUARDED;
74 size_t n_streams OVS_GUARDED;
76 FILE *tx_pcap, *rx_pcap OVS_GUARDED;
78 struct list rxes OVS_GUARDED; /* List of child "netdev_rx_dummy"s. */
81 /* Max 'recv_queue_len' in struct netdev_dummy. */
82 #define NETDEV_DUMMY_MAX_QUEUE 100
84 struct netdev_rx_dummy {
86 struct list node; /* In netdev_dummy's "rxes" list. */
87 struct list recv_queue;
88 int recv_queue_len; /* list_size(&recv_queue). */
92 static unixctl_cb_func netdev_dummy_set_admin_state;
93 static int netdev_dummy_construct(struct netdev *);
94 static void netdev_dummy_changed(struct netdev_dummy *netdev)
95 OVS_REQUIRES(netdev->mutex);
96 static void netdev_dummy_queue_packet(struct netdev_dummy *, struct ofpbuf *);
98 static void dummy_stream_close(struct dummy_stream *);
101 is_dummy_class(const struct netdev_class *class)
103 return class->construct == netdev_dummy_construct;
106 static struct netdev_dummy *
107 netdev_dummy_cast(const struct netdev *netdev)
109 ovs_assert(is_dummy_class(netdev_get_class(netdev)));
110 return CONTAINER_OF(netdev, struct netdev_dummy, up);
113 static struct netdev_rx_dummy *
114 netdev_rx_dummy_cast(const struct netdev_rx *rx)
116 ovs_assert(is_dummy_class(netdev_get_class(rx->netdev)));
117 return CONTAINER_OF(rx, struct netdev_rx_dummy, up);
121 netdev_dummy_run(void)
123 struct netdev_dummy *dev;
125 ovs_mutex_lock(&dummy_list_mutex);
126 LIST_FOR_EACH (dev, list_node, &dummy_list) {
129 ovs_mutex_lock(&dev->mutex);
132 struct stream *new_stream;
135 error = pstream_accept(dev->pstream, &new_stream);
137 struct dummy_stream *s;
139 dev->streams = xrealloc(dev->streams,
140 ((dev->n_streams + 1)
141 * sizeof *dev->streams));
142 s = &dev->streams[dev->n_streams++];
143 s->stream = new_stream;
144 ofpbuf_init(&s->rxbuf, 2048);
146 } else if (error != EAGAIN) {
147 VLOG_WARN("%s: accept failed (%s)",
148 pstream_get_name(dev->pstream), ovs_strerror(error));
149 pstream_close(dev->pstream);
154 for (i = 0; i < dev->n_streams; i++) {
155 struct dummy_stream *s = &dev->streams[i];
159 stream_run(s->stream);
161 if (!list_is_empty(&s->txq)) {
162 struct ofpbuf *txbuf;
165 txbuf = ofpbuf_from_list(list_front(&s->txq));
166 retval = stream_send(s->stream, txbuf->data, txbuf->size);
168 ofpbuf_pull(txbuf, retval);
170 list_remove(&txbuf->list_node);
171 ofpbuf_delete(txbuf);
173 } else if (retval != -EAGAIN) {
179 if (s->rxbuf.size < 2) {
180 n = 2 - s->rxbuf.size;
184 frame_len = ntohs(get_unaligned_be16(s->rxbuf.data));
185 if (frame_len < ETH_HEADER_LEN) {
189 n = (2 + frame_len) - s->rxbuf.size;
196 ofpbuf_prealloc_tailroom(&s->rxbuf, n);
197 retval = stream_recv(s->stream, ofpbuf_tail(&s->rxbuf), n);
199 s->rxbuf.size += retval;
200 if (retval == n && s->rxbuf.size > 2) {
201 ofpbuf_pull(&s->rxbuf, 2);
202 netdev_dummy_queue_packet(dev,
203 ofpbuf_clone(&s->rxbuf));
204 ofpbuf_clear(&s->rxbuf);
206 } else if (retval != -EAGAIN) {
207 error = (retval < 0 ? -retval
208 : s->rxbuf.size ? EPROTO
214 VLOG_DBG("%s: closing connection (%s)",
215 stream_get_name(s->stream),
216 ovs_retval_to_string(error));
217 dummy_stream_close(&dev->streams[i]);
218 dev->streams[i] = dev->streams[--dev->n_streams];
222 ovs_mutex_unlock(&dev->mutex);
224 ovs_mutex_unlock(&dummy_list_mutex);
228 dummy_stream_close(struct dummy_stream *s)
230 stream_close(s->stream);
231 ofpbuf_uninit(&s->rxbuf);
232 ofpbuf_list_delete(&s->txq);
236 netdev_dummy_wait(void)
238 struct netdev_dummy *dev;
240 ovs_mutex_lock(&dummy_list_mutex);
241 LIST_FOR_EACH (dev, list_node, &dummy_list) {
244 ovs_mutex_lock(&dev->mutex);
246 pstream_wait(dev->pstream);
248 for (i = 0; i < dev->n_streams; i++) {
249 struct dummy_stream *s = &dev->streams[i];
251 stream_run_wait(s->stream);
252 if (!list_is_empty(&s->txq)) {
253 stream_send_wait(s->stream);
255 stream_recv_wait(s->stream);
257 ovs_mutex_unlock(&dev->mutex);
259 ovs_mutex_unlock(&dummy_list_mutex);
262 static struct netdev *
263 netdev_dummy_alloc(void)
265 struct netdev_dummy *netdev = xzalloc(sizeof *netdev);
270 netdev_dummy_construct(struct netdev *netdev_)
272 static atomic_uint next_n = ATOMIC_VAR_INIT(0xaa550000);
273 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
276 atomic_add(&next_n, 1, &n);
278 ovs_mutex_init(&netdev->mutex);
279 ovs_mutex_lock(&netdev->mutex);
280 netdev->hwaddr[0] = 0xaa;
281 netdev->hwaddr[1] = 0x55;
282 netdev->hwaddr[2] = n >> 24;
283 netdev->hwaddr[3] = n >> 16;
284 netdev->hwaddr[4] = n >> 8;
285 netdev->hwaddr[5] = n;
288 netdev->change_seq = 1;
289 netdev->ifindex = -EOPNOTSUPP;
291 netdev->pstream = NULL;
292 netdev->streams = NULL;
293 netdev->n_streams = 0;
295 list_init(&netdev->rxes);
296 ovs_mutex_unlock(&netdev->mutex);
298 ovs_mutex_lock(&dummy_list_mutex);
299 list_push_back(&dummy_list, &netdev->list_node);
300 ovs_mutex_unlock(&dummy_list_mutex);
306 netdev_dummy_destruct(struct netdev *netdev_)
308 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
311 ovs_mutex_lock(&dummy_list_mutex);
312 list_remove(&netdev->list_node);
313 ovs_mutex_unlock(&dummy_list_mutex);
315 ovs_mutex_lock(&netdev->mutex);
316 pstream_close(netdev->pstream);
317 for (i = 0; i < netdev->n_streams; i++) {
318 dummy_stream_close(&netdev->streams[i]);
320 free(netdev->streams);
321 ovs_mutex_unlock(&netdev->mutex);
322 ovs_mutex_destroy(&netdev->mutex);
326 netdev_dummy_dealloc(struct netdev *netdev_)
328 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
334 netdev_dummy_get_config(const struct netdev *netdev_, struct smap *args)
336 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
338 ovs_mutex_lock(&netdev->mutex);
340 if (netdev->ifindex >= 0) {
341 smap_add_format(args, "ifindex", "%d", netdev->ifindex);
344 if (netdev->pstream) {
345 smap_add(args, "pstream", pstream_get_name(netdev->pstream));
348 ovs_mutex_unlock(&netdev->mutex);
353 netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args)
355 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
359 ovs_mutex_lock(&netdev->mutex);
360 netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
362 pstream = smap_get(args, "pstream");
365 || strcmp(pstream_get_name(netdev->pstream), pstream)) {
366 pstream_close(netdev->pstream);
367 netdev->pstream = NULL;
372 error = pstream_open(pstream, &netdev->pstream, DSCP_DEFAULT);
374 VLOG_WARN("%s: open failed (%s)",
375 pstream, ovs_strerror(error));
380 if (netdev->rx_pcap) {
381 fclose(netdev->rx_pcap);
383 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rx_pcap) {
384 fclose(netdev->tx_pcap);
386 netdev->rx_pcap = netdev->tx_pcap = NULL;
387 pcap = smap_get(args, "pcap");
389 netdev->rx_pcap = netdev->tx_pcap = pcap_open(pcap, "ab");
391 const char *rx_pcap = smap_get(args, "rx_pcap");
392 const char *tx_pcap = smap_get(args, "tx_pcap");
395 netdev->rx_pcap = pcap_open(rx_pcap, "ab");
398 netdev->tx_pcap = pcap_open(tx_pcap, "ab");
402 ovs_mutex_unlock(&netdev->mutex);
407 static struct netdev_rx *
408 netdev_dummy_rx_alloc(void)
410 struct netdev_rx_dummy *rx = xzalloc(sizeof *rx);
415 netdev_dummy_rx_construct(struct netdev_rx *rx_)
417 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
418 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
420 ovs_mutex_lock(&netdev->mutex);
421 list_push_back(&netdev->rxes, &rx->node);
422 list_init(&rx->recv_queue);
423 rx->recv_queue_len = 0;
424 ovs_mutex_unlock(&netdev->mutex);
430 netdev_dummy_rx_destruct(struct netdev_rx *rx_)
432 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
433 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
435 ovs_mutex_lock(&netdev->mutex);
436 list_remove(&rx->node);
437 ofpbuf_list_delete(&rx->recv_queue);
438 ovs_mutex_unlock(&netdev->mutex);
442 netdev_dummy_rx_dealloc(struct netdev_rx *rx_)
444 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
450 netdev_dummy_rx_recv(struct netdev_rx *rx_, void *buffer, size_t size)
452 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
453 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
454 struct ofpbuf *packet;
457 ovs_mutex_lock(&netdev->mutex);
458 if (!list_is_empty(&rx->recv_queue)) {
459 packet = ofpbuf_from_list(list_pop_front(&rx->recv_queue));
460 rx->recv_queue_len--;
464 ovs_mutex_unlock(&netdev->mutex);
470 if (packet->size <= size) {
471 memcpy(buffer, packet->data, packet->size);
472 retval = packet->size;
474 ovs_mutex_lock(&netdev->mutex);
475 netdev->stats.rx_packets++;
476 netdev->stats.rx_bytes += packet->size;
477 ovs_mutex_unlock(&netdev->mutex);
481 ofpbuf_delete(packet);
487 netdev_dummy_rx_wait(struct netdev_rx *rx_)
489 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
490 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
492 ovs_mutex_lock(&netdev->mutex);
493 if (!list_is_empty(&rx->recv_queue)) {
494 poll_immediate_wake();
496 ovs_mutex_unlock(&netdev->mutex);
500 netdev_dummy_rx_drain(struct netdev_rx *rx_)
502 struct netdev_rx_dummy *rx = netdev_rx_dummy_cast(rx_);
503 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
505 ovs_mutex_lock(&netdev->mutex);
506 ofpbuf_list_delete(&rx->recv_queue);
507 rx->recv_queue_len = 0;
508 ovs_mutex_unlock(&netdev->mutex);
514 netdev_dummy_send(struct netdev *netdev, const void *buffer, size_t size)
516 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
519 if (size < ETH_HEADER_LEN) {
522 const struct eth_header *eth = buffer;
525 ovs_mutex_lock(&dev->mutex);
526 max_size = dev->mtu + ETH_HEADER_LEN;
527 ovs_mutex_unlock(&dev->mutex);
529 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
530 max_size += VLAN_HEADER_LEN;
532 if (size > max_size) {
537 ovs_mutex_lock(&dev->mutex);
538 dev->stats.tx_packets++;
539 dev->stats.tx_bytes += size;
542 struct ofpbuf packet;
544 ofpbuf_use_const(&packet, buffer, size);
545 pcap_write(dev->tx_pcap, &packet);
546 fflush(dev->tx_pcap);
549 for (i = 0; i < dev->n_streams; i++) {
550 struct dummy_stream *s = &dev->streams[i];
552 if (list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
555 b = ofpbuf_clone_data_with_headroom(buffer, size, 2);
556 put_unaligned_be16(ofpbuf_push_uninit(b, 2), htons(size));
557 list_push_back(&s->txq, &b->list_node);
560 ovs_mutex_unlock(&dev->mutex);
566 netdev_dummy_set_etheraddr(struct netdev *netdev,
567 const uint8_t mac[ETH_ADDR_LEN])
569 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
571 ovs_mutex_lock(&dev->mutex);
572 if (!eth_addr_equals(dev->hwaddr, mac)) {
573 memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
574 netdev_dummy_changed(dev);
576 ovs_mutex_unlock(&dev->mutex);
582 netdev_dummy_get_etheraddr(const struct netdev *netdev,
583 uint8_t mac[ETH_ADDR_LEN])
585 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
587 ovs_mutex_lock(&dev->mutex);
588 memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
589 ovs_mutex_unlock(&dev->mutex);
595 netdev_dummy_get_mtu(const struct netdev *netdev, int *mtup)
597 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
599 ovs_mutex_lock(&dev->mutex);
601 ovs_mutex_unlock(&dev->mutex);
607 netdev_dummy_set_mtu(const struct netdev *netdev, int mtu)
609 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
611 ovs_mutex_lock(&dev->mutex);
613 ovs_mutex_unlock(&dev->mutex);
619 netdev_dummy_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
621 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
623 ovs_mutex_lock(&dev->mutex);
625 ovs_mutex_unlock(&dev->mutex);
631 netdev_dummy_set_stats(struct netdev *netdev, const struct netdev_stats *stats)
633 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
635 ovs_mutex_lock(&dev->mutex);
637 ovs_mutex_unlock(&dev->mutex);
643 netdev_dummy_get_ifindex(const struct netdev *netdev)
645 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
648 ovs_mutex_lock(&dev->mutex);
649 ifindex = dev->ifindex;
650 ovs_mutex_unlock(&dev->mutex);
656 netdev_dummy_update_flags__(struct netdev_dummy *netdev,
657 enum netdev_flags off, enum netdev_flags on,
658 enum netdev_flags *old_flagsp)
659 OVS_REQUIRES(netdev->mutex)
661 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
665 *old_flagsp = netdev->flags;
667 netdev->flags &= ~off;
668 if (*old_flagsp != netdev->flags) {
669 netdev_dummy_changed(netdev);
676 netdev_dummy_update_flags(struct netdev *netdev_,
677 enum netdev_flags off, enum netdev_flags on,
678 enum netdev_flags *old_flagsp)
680 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
683 ovs_mutex_lock(&netdev->mutex);
684 error = netdev_dummy_update_flags__(netdev, off, on, old_flagsp);
685 ovs_mutex_unlock(&netdev->mutex);
691 netdev_dummy_change_seq(const struct netdev *netdev_)
693 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
694 unsigned int change_seq;
696 ovs_mutex_lock(&netdev->mutex);
697 change_seq = netdev->change_seq;
698 ovs_mutex_unlock(&netdev->mutex);
703 /* Helper functions. */
706 netdev_dummy_changed(struct netdev_dummy *dev)
709 if (!dev->change_seq) {
714 static const struct netdev_class dummy_class = {
721 netdev_dummy_construct,
722 netdev_dummy_destruct,
723 netdev_dummy_dealloc,
724 netdev_dummy_get_config,
725 netdev_dummy_set_config,
726 NULL, /* get_tunnel_config */
728 netdev_dummy_send, /* send */
729 NULL, /* send_wait */
731 netdev_dummy_set_etheraddr,
732 netdev_dummy_get_etheraddr,
733 netdev_dummy_get_mtu,
734 netdev_dummy_set_mtu,
735 netdev_dummy_get_ifindex,
736 NULL, /* get_carrier */
737 NULL, /* get_carrier_resets */
738 NULL, /* get_miimon */
739 netdev_dummy_get_stats,
740 netdev_dummy_set_stats,
742 NULL, /* get_features */
743 NULL, /* set_advertisements */
745 NULL, /* set_policing */
746 NULL, /* get_qos_types */
747 NULL, /* get_qos_capabilities */
750 NULL, /* get_queue */
751 NULL, /* set_queue */
752 NULL, /* delete_queue */
753 NULL, /* get_queue_stats */
754 NULL, /* queue_dump_start */
755 NULL, /* queue_dump_next */
756 NULL, /* queue_dump_done */
757 NULL, /* dump_queue_stats */
762 NULL, /* add_router */
763 NULL, /* get_next_hop */
764 NULL, /* get_status */
765 NULL, /* arp_lookup */
767 netdev_dummy_update_flags,
769 netdev_dummy_change_seq,
771 netdev_dummy_rx_alloc,
772 netdev_dummy_rx_construct,
773 netdev_dummy_rx_destruct,
774 netdev_dummy_rx_dealloc,
775 netdev_dummy_rx_recv,
776 netdev_dummy_rx_wait,
777 netdev_dummy_rx_drain,
780 static struct ofpbuf *
781 eth_from_packet_or_flow(const char *s)
783 enum odp_key_fitness fitness;
784 struct ofpbuf *packet;
785 struct ofpbuf odp_key;
789 if (!eth_from_hex(s, &packet)) {
793 /* Convert string to datapath key.
795 * It would actually be nicer to parse an OpenFlow-like flow key here, but
796 * the code for that currently calls exit() on parse error. We have to
797 * settle for parsing a datapath key for now.
799 ofpbuf_init(&odp_key, 0);
800 error = odp_flow_from_string(s, NULL, &odp_key, NULL);
802 ofpbuf_uninit(&odp_key);
806 /* Convert odp_key to flow. */
807 fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow);
808 if (fitness == ODP_FIT_ERROR) {
809 ofpbuf_uninit(&odp_key);
813 packet = ofpbuf_new(0);
814 flow_compose(packet, &flow);
816 ofpbuf_uninit(&odp_key);
821 netdev_dummy_queue_packet__(struct netdev_rx_dummy *rx, struct ofpbuf *packet)
823 list_push_back(&rx->recv_queue, &packet->list_node);
824 rx->recv_queue_len++;
828 netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct ofpbuf *packet)
829 OVS_REQUIRES(dummy->mutex)
831 struct netdev_rx_dummy *rx, *prev;
833 if (dummy->rx_pcap) {
834 pcap_write(dummy->rx_pcap, packet);
835 fflush(dummy->rx_pcap);
838 LIST_FOR_EACH (rx, node, &dummy->rxes) {
839 if (rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
841 netdev_dummy_queue_packet__(prev, ofpbuf_clone(packet));
847 netdev_dummy_queue_packet__(prev, packet);
849 ofpbuf_delete(packet);
854 netdev_dummy_receive(struct unixctl_conn *conn,
855 int argc, const char *argv[], void *aux OVS_UNUSED)
857 struct netdev_dummy *dummy_dev;
858 struct netdev *netdev;
861 netdev = netdev_from_name(argv[1]);
862 if (!netdev || !is_dummy_class(netdev->netdev_class)) {
863 unixctl_command_reply_error(conn, "no such dummy netdev");
866 dummy_dev = netdev_dummy_cast(netdev);
868 for (i = 2; i < argc; i++) {
869 struct ofpbuf *packet;
871 packet = eth_from_packet_or_flow(argv[i]);
873 unixctl_command_reply_error(conn, "bad packet syntax");
877 ovs_mutex_lock(&dummy_dev->mutex);
878 netdev_dummy_queue_packet(dummy_dev, packet);
879 ovs_mutex_unlock(&dummy_dev->mutex);
882 unixctl_command_reply(conn, NULL);
885 netdev_close(netdev);
889 netdev_dummy_set_admin_state__(struct netdev_dummy *dev, bool admin_state)
890 OVS_REQUIRES(dev->mutex)
892 enum netdev_flags old_flags;
895 netdev_dummy_update_flags__(dev, 0, NETDEV_UP, &old_flags);
897 netdev_dummy_update_flags__(dev, NETDEV_UP, 0, &old_flags);
902 netdev_dummy_set_admin_state(struct unixctl_conn *conn, int argc,
903 const char *argv[], void *aux OVS_UNUSED)
907 if (!strcasecmp(argv[argc - 1], "up")) {
909 } else if ( !strcasecmp(argv[argc - 1], "down")) {
912 unixctl_command_reply_error(conn, "Invalid Admin State");
917 struct netdev *netdev = netdev_from_name(argv[1]);
918 if (netdev && is_dummy_class(netdev->netdev_class)) {
919 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
921 ovs_mutex_lock(&dummy_dev->mutex);
922 netdev_dummy_set_admin_state__(dummy_dev, up);
923 ovs_mutex_unlock(&dummy_dev->mutex);
925 netdev_close(netdev);
927 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
928 netdev_close(netdev);
932 struct netdev_dummy *netdev;
934 ovs_mutex_lock(&dummy_list_mutex);
935 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
936 ovs_mutex_lock(&netdev->mutex);
937 netdev_dummy_set_admin_state__(netdev, up);
938 ovs_mutex_unlock(&netdev->mutex);
940 ovs_mutex_unlock(&dummy_list_mutex);
942 unixctl_command_reply(conn, "OK");
946 netdev_dummy_register(bool override)
948 unixctl_command_register("netdev-dummy/receive", "NAME PACKET|FLOW...",
949 2, INT_MAX, netdev_dummy_receive, NULL);
950 unixctl_command_register("netdev-dummy/set-admin-state",
951 "[netdev] up|down", 1, 2,
952 netdev_dummy_set_admin_state, NULL);
959 netdev_enumerate_types(&types);
960 SSET_FOR_EACH (type, &types) {
961 if (!strcmp(type, "patch")) {
964 if (!netdev_unregister_provider(type)) {
965 struct netdev_class *class;
968 class = xmemdup(&dummy_class, sizeof dummy_class);
969 class->type = xstrdup(type);
970 error = netdev_register_provider(class);
972 VLOG_ERR("%s: failed to register netdev provider (%s)",
973 type, ovs_strerror(error));
974 free(CONST_CAST(char *, class->type));
979 sset_destroy(&types);
981 netdev_register_provider(&dummy_class);
983 netdev_vport_tunnel_register();