2 * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
24 #include <netinet/in.h>
25 #include <sys/socket.h>
30 #include <sys/ioctl.h>
36 #include "dpif-provider.h"
38 #include "dynamic-string.h"
43 #include "netdev-vport.h"
45 #include "odp-execute.h"
47 #include "ofp-print.h"
50 #include "poll-loop.h"
58 VLOG_DEFINE_THIS_MODULE(dpif_netdev);
60 /* Configuration parameters. */
61 enum { MAX_PORTS = 256 }; /* Maximum number of ports. */
62 enum { MAX_FLOWS = 65536 }; /* Maximum number of flows in flow table. */
64 /* Enough headroom to add a vlan tag, plus an extra 2 bytes to allow IP
65 * headers to be aligned on a 4-byte boundary. */
66 enum { DP_NETDEV_HEADROOM = 2 + VLAN_HEADER_LEN };
69 enum { N_QUEUES = 2 }; /* Number of queues for dpif_recv(). */
70 enum { MAX_QUEUE_LEN = 128 }; /* Maximum number of packets per queue. */
71 enum { QUEUE_MASK = MAX_QUEUE_LEN - 1 };
72 BUILD_ASSERT_DECL(IS_POW2(MAX_QUEUE_LEN));
74 struct dp_netdev_upcall {
75 struct dpif_upcall upcall; /* Queued upcall information. */
76 struct ofpbuf buf; /* ofpbuf instance for upcall.packet. */
79 struct dp_netdev_queue {
80 struct dp_netdev_upcall upcalls[MAX_QUEUE_LEN];
81 unsigned int head, tail;
84 /* Datapath based on the network device interface from netdev.h. */
86 const struct dpif_class *class;
90 int max_mtu; /* Maximum MTU of any port added so far. */
92 struct dp_netdev_queue queues[N_QUEUES];
93 struct hmap flow_table; /* Flow table. */
96 long long int n_hit; /* Number of flow table matches. */
97 long long int n_missed; /* Number of flow table misses. */
98 long long int n_lost; /* Number of misses not passed to client. */
101 struct dp_netdev_port *ports[MAX_PORTS];
102 struct list port_list;
106 /* A port in a netdev-based datapath. */
107 struct dp_netdev_port {
108 odp_port_t port_no; /* Index into dp_netdev's 'ports'. */
109 struct list node; /* Element in dp_netdev's 'port_list'. */
110 struct netdev *netdev;
111 struct netdev_saved_flags *sf;
112 struct netdev_rx *rx;
113 char *type; /* Port type as requested by user. */
116 /* A flow in dp_netdev's 'flow_table'. */
117 struct dp_netdev_flow {
118 struct hmap_node node; /* Element in dp_netdev's 'flow_table'. */
122 long long int used; /* Last used time, in monotonic msecs. */
123 long long int packet_count; /* Number of packets matched. */
124 long long int byte_count; /* Number of bytes matched. */
125 uint8_t tcp_flags; /* Bitwise-OR of seen tcp_flags values. */
128 struct nlattr *actions;
132 /* Interface to netdev-based datapath. */
135 struct dp_netdev *dp;
136 unsigned int dp_serial;
139 /* All netdev-based datapaths. */
140 static struct shash dp_netdevs = SHASH_INITIALIZER(&dp_netdevs);
142 /* Global lock for all data. */
143 static pthread_mutex_t dp_netdev_mutex = PTHREAD_MUTEX_INITIALIZER;
145 static int get_port_by_number(struct dp_netdev *, odp_port_t port_no,
146 struct dp_netdev_port **portp);
147 static int get_port_by_name(struct dp_netdev *, const char *devname,
148 struct dp_netdev_port **portp);
149 static void dp_netdev_free(struct dp_netdev *);
150 static void dp_netdev_flow_flush(struct dp_netdev *);
151 static int do_add_port(struct dp_netdev *, const char *devname,
152 const char *type, odp_port_t port_no);
153 static int do_del_port(struct dp_netdev *, odp_port_t port_no);
154 static int dpif_netdev_open(const struct dpif_class *, const char *name,
155 bool create, struct dpif **);
156 static int dp_netdev_output_userspace(struct dp_netdev *, const struct ofpbuf *,
157 int queue_no, const struct flow *,
158 const struct nlattr *userdata);
159 static void dp_netdev_execute_actions(struct dp_netdev *,
160 struct ofpbuf *, struct flow *,
161 const struct nlattr *actions,
163 static void dp_netdev_port_input(struct dp_netdev *dp,
164 struct dp_netdev_port *port,
165 struct ofpbuf *packet, uint32_t skb_priority,
166 uint32_t skb_mark, const struct flow_tnl *tnl);
168 static struct dpif_netdev *
169 dpif_netdev_cast(const struct dpif *dpif)
171 ovs_assert(dpif->dpif_class->open == dpif_netdev_open);
172 return CONTAINER_OF(dpif, struct dpif_netdev, dpif);
175 static struct dp_netdev *
176 get_dp_netdev(const struct dpif *dpif)
178 return dpif_netdev_cast(dpif)->dp;
182 dpif_netdev_enumerate(struct sset *all_dps)
184 struct shash_node *node;
186 xpthread_mutex_lock(&dp_netdev_mutex);
187 SHASH_FOR_EACH(node, &dp_netdevs) {
188 sset_add(all_dps, node->name);
190 xpthread_mutex_unlock(&dp_netdev_mutex);
196 dpif_netdev_class_is_dummy(const struct dpif_class *class)
198 return class != &dpif_netdev_class;
202 dpif_netdev_port_open_type(const struct dpif_class *class, const char *type)
204 return strcmp(type, "internal") ? type
205 : dpif_netdev_class_is_dummy(class) ? "dummy"
210 create_dpif_netdev(struct dp_netdev *dp)
212 uint16_t netflow_id = hash_string(dp->name, 0);
213 struct dpif_netdev *dpif;
217 dpif = xmalloc(sizeof *dpif);
218 dpif_init(&dpif->dpif, dp->class, dp->name, netflow_id >> 8, netflow_id);
220 dpif->dp_serial = dp->serial;
225 /* Choose an unused, non-zero port number and return it on success.
226 * Return ODPP_NONE on failure. */
228 choose_port(struct dp_netdev *dp, const char *name)
232 if (dp->class != &dpif_netdev_class) {
236 /* If the port name begins with "br", start the number search at
237 * 100 to make writing tests easier. */
238 if (!strncmp(name, "br", 2)) {
242 /* If the port name contains a number, try to assign that port number.
243 * This can make writing unit tests easier because port numbers are
245 for (p = name; *p != '\0'; p++) {
246 if (isdigit((unsigned char) *p)) {
247 port_no = start_no + strtol(p, NULL, 10);
248 if (port_no > 0 && port_no < MAX_PORTS
249 && !dp->ports[port_no]) {
250 return u32_to_odp(port_no);
257 for (port_no = 1; port_no < MAX_PORTS; port_no++) {
258 if (!dp->ports[port_no]) {
259 return u32_to_odp(port_no);
267 create_dp_netdev(const char *name, const struct dpif_class *class,
268 struct dp_netdev **dpp)
270 struct dp_netdev *dp;
274 dp = xzalloc(sizeof *dp);
276 dp->name = xstrdup(name);
278 dp->max_mtu = ETH_PAYLOAD_MAX;
279 for (i = 0; i < N_QUEUES; i++) {
280 dp->queues[i].head = dp->queues[i].tail = 0;
282 hmap_init(&dp->flow_table);
283 list_init(&dp->port_list);
285 error = do_add_port(dp, name, "internal", ODPP_LOCAL);
291 shash_add(&dp_netdevs, name, dp);
298 dpif_netdev_open(const struct dpif_class *class, const char *name,
299 bool create, struct dpif **dpifp)
301 struct dp_netdev *dp;
304 xpthread_mutex_lock(&dp_netdev_mutex);
305 dp = shash_find_data(&dp_netdevs, name);
307 error = create ? create_dp_netdev(name, class, &dp) : ENODEV;
309 error = (dp->class != class ? EINVAL
314 *dpifp = create_dpif_netdev(dp);
316 xpthread_mutex_unlock(&dp_netdev_mutex);
322 dp_netdev_purge_queues(struct dp_netdev *dp)
326 for (i = 0; i < N_QUEUES; i++) {
327 struct dp_netdev_queue *q = &dp->queues[i];
329 while (q->tail != q->head) {
330 struct dp_netdev_upcall *u = &q->upcalls[q->tail++ & QUEUE_MASK];
331 ofpbuf_uninit(&u->buf);
337 dp_netdev_free(struct dp_netdev *dp)
339 struct dp_netdev_port *port, *next;
341 dp_netdev_flow_flush(dp);
342 LIST_FOR_EACH_SAFE (port, next, node, &dp->port_list) {
343 do_del_port(dp, port->port_no);
345 dp_netdev_purge_queues(dp);
346 hmap_destroy(&dp->flow_table);
352 dpif_netdev_close(struct dpif *dpif)
354 struct dp_netdev *dp = get_dp_netdev(dpif);
356 xpthread_mutex_lock(&dp_netdev_mutex);
358 ovs_assert(dp->open_cnt > 0);
359 if (--dp->open_cnt == 0 && dp->destroyed) {
360 shash_find_and_delete(&dp_netdevs, dp->name);
365 xpthread_mutex_unlock(&dp_netdev_mutex);
369 dpif_netdev_destroy(struct dpif *dpif)
371 struct dp_netdev *dp = get_dp_netdev(dpif);
373 xpthread_mutex_lock(&dp_netdev_mutex);
374 dp->destroyed = true;
375 xpthread_mutex_unlock(&dp_netdev_mutex);
381 dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
383 struct dp_netdev *dp = get_dp_netdev(dpif);
385 xpthread_mutex_lock(&dp_netdev_mutex);
386 stats->n_flows = hmap_count(&dp->flow_table);
387 stats->n_hit = dp->n_hit;
388 stats->n_missed = dp->n_missed;
389 stats->n_lost = dp->n_lost;
390 xpthread_mutex_unlock(&dp_netdev_mutex);
396 do_add_port(struct dp_netdev *dp, const char *devname, const char *type,
399 struct netdev_saved_flags *sf;
400 struct dp_netdev_port *port;
401 struct netdev *netdev;
402 struct netdev_rx *rx;
403 const char *open_type;
407 /* XXX reject devices already in some dp_netdev. */
409 /* Open and validate network device. */
410 open_type = dpif_netdev_port_open_type(dp->class, type);
411 error = netdev_open(devname, open_type, &netdev);
415 /* XXX reject loopback devices */
416 /* XXX reject non-Ethernet devices */
418 error = netdev_rx_open(netdev, &rx);
420 && !(error == EOPNOTSUPP && dpif_netdev_class_is_dummy(dp->class))) {
421 VLOG_ERR("%s: cannot receive packets on this network device (%s)",
422 devname, ovs_strerror(errno));
423 netdev_close(netdev);
427 error = netdev_turn_flags_on(netdev, NETDEV_PROMISC, &sf);
430 netdev_close(netdev);
434 port = xmalloc(sizeof *port);
435 port->port_no = port_no;
436 port->netdev = netdev;
439 port->type = xstrdup(type);
441 error = netdev_get_mtu(netdev, &mtu);
442 if (!error && mtu > dp->max_mtu) {
446 list_push_back(&dp->port_list, &port->node);
447 dp->ports[odp_to_u32(port_no)] = port;
454 dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev,
455 odp_port_t *port_nop)
457 struct dp_netdev *dp = get_dp_netdev(dpif);
458 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
459 const char *dpif_port;
463 xpthread_mutex_lock(&dp_netdev_mutex);
464 dpif_port = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
465 if (*port_nop != ODPP_NONE) {
466 uint32_t port_idx = odp_to_u32(*port_nop);
467 if (port_idx >= MAX_PORTS) {
469 } else if (dp->ports[port_idx]) {
476 port_no = choose_port(dp, dpif_port);
477 error = port_no == ODPP_NONE ? EFBIG : 0;
481 error = do_add_port(dp, dpif_port, netdev_get_type(netdev), port_no);
483 xpthread_mutex_unlock(&dp_netdev_mutex);
489 dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no)
491 struct dp_netdev *dp = get_dp_netdev(dpif);
494 xpthread_mutex_lock(&dp_netdev_mutex);
495 error = port_no == ODPP_LOCAL ? EINVAL : do_del_port(dp, port_no);
496 xpthread_mutex_unlock(&dp_netdev_mutex);
502 is_valid_port_number(odp_port_t port_no)
504 return odp_to_u32(port_no) < MAX_PORTS;
508 get_port_by_number(struct dp_netdev *dp,
509 odp_port_t port_no, struct dp_netdev_port **portp)
511 if (!is_valid_port_number(port_no)) {
515 *portp = dp->ports[odp_to_u32(port_no)];
516 return *portp ? 0 : ENOENT;
521 get_port_by_name(struct dp_netdev *dp,
522 const char *devname, struct dp_netdev_port **portp)
524 struct dp_netdev_port *port;
526 LIST_FOR_EACH (port, node, &dp->port_list) {
527 if (!strcmp(netdev_get_name(port->netdev), devname)) {
536 do_del_port(struct dp_netdev *dp, odp_port_t port_no)
538 struct dp_netdev_port *port;
541 error = get_port_by_number(dp, port_no, &port);
546 list_remove(&port->node);
547 dp->ports[odp_to_u32(port_no)] = NULL;
550 netdev_close(port->netdev);
551 netdev_restore_flags(port->sf);
552 netdev_rx_close(port->rx);
560 answer_port_query(const struct dp_netdev_port *port,
561 struct dpif_port *dpif_port)
563 dpif_port->name = xstrdup(netdev_get_name(port->netdev));
564 dpif_port->type = xstrdup(port->type);
565 dpif_port->port_no = port->port_no;
569 dpif_netdev_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
570 struct dpif_port *dpif_port)
572 struct dp_netdev *dp = get_dp_netdev(dpif);
573 struct dp_netdev_port *port;
576 xpthread_mutex_lock(&dp_netdev_mutex);
577 error = get_port_by_number(dp, port_no, &port);
578 if (!error && dpif_port) {
579 answer_port_query(port, dpif_port);
581 xpthread_mutex_unlock(&dp_netdev_mutex);
587 dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname,
588 struct dpif_port *dpif_port)
590 struct dp_netdev *dp = get_dp_netdev(dpif);
591 struct dp_netdev_port *port;
594 xpthread_mutex_lock(&dp_netdev_mutex);
595 error = get_port_by_name(dp, devname, &port);
596 if (!error && dpif_port) {
597 answer_port_query(port, dpif_port);
599 xpthread_mutex_unlock(&dp_netdev_mutex);
605 dpif_netdev_get_max_ports(const struct dpif *dpif OVS_UNUSED)
607 return u32_to_odp(MAX_PORTS);
611 dp_netdev_free_flow(struct dp_netdev *dp, struct dp_netdev_flow *flow)
613 hmap_remove(&dp->flow_table, &flow->node);
619 dp_netdev_flow_flush(struct dp_netdev *dp)
621 struct dp_netdev_flow *flow, *next;
623 HMAP_FOR_EACH_SAFE (flow, next, node, &dp->flow_table) {
624 dp_netdev_free_flow(dp, flow);
629 dpif_netdev_flow_flush(struct dpif *dpif)
631 struct dp_netdev *dp = get_dp_netdev(dpif);
633 xpthread_mutex_lock(&dp_netdev_mutex);
634 dp_netdev_flow_flush(dp);
635 xpthread_mutex_unlock(&dp_netdev_mutex);
640 struct dp_netdev_port_state {
646 dpif_netdev_port_dump_start(const struct dpif *dpif OVS_UNUSED, void **statep)
648 *statep = xzalloc(sizeof(struct dp_netdev_port_state));
653 dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_,
654 struct dpif_port *dpif_port)
656 struct dp_netdev_port_state *state = state_;
657 struct dp_netdev *dp = get_dp_netdev(dpif);
660 xpthread_mutex_lock(&dp_netdev_mutex);
661 for (port_idx = odp_to_u32(state->port_no);
662 port_idx < MAX_PORTS; port_idx++) {
663 struct dp_netdev_port *port = dp->ports[port_idx];
666 state->name = xstrdup(netdev_get_name(port->netdev));
667 dpif_port->name = state->name;
668 dpif_port->type = port->type;
669 dpif_port->port_no = port->port_no;
670 state->port_no = u32_to_odp(port_idx + 1);
671 xpthread_mutex_unlock(&dp_netdev_mutex);
676 xpthread_mutex_unlock(&dp_netdev_mutex);
682 dpif_netdev_port_dump_done(const struct dpif *dpif OVS_UNUSED, void *state_)
684 struct dp_netdev_port_state *state = state_;
691 dpif_netdev_port_poll(const struct dpif *dpif_, char **devnamep OVS_UNUSED)
693 struct dpif_netdev *dpif = dpif_netdev_cast(dpif_);
696 xpthread_mutex_lock(&dp_netdev_mutex);
697 if (dpif->dp_serial != dpif->dp->serial) {
698 dpif->dp_serial = dpif->dp->serial;
703 xpthread_mutex_unlock(&dp_netdev_mutex);
709 dpif_netdev_port_poll_wait(const struct dpif *dpif_)
711 struct dpif_netdev *dpif = dpif_netdev_cast(dpif_);
713 /* XXX In a multithreaded process, there is a race window between this
714 * function and the poll_block() in one thread and a change in
715 * dpif->dp->serial in another thread. */
717 xpthread_mutex_lock(&dp_netdev_mutex);
718 if (dpif->dp_serial != dpif->dp->serial) {
719 poll_immediate_wake();
721 xpthread_mutex_unlock(&dp_netdev_mutex);
724 static struct dp_netdev_flow *
725 dp_netdev_lookup_flow(const struct dp_netdev *dp, const struct flow *key)
727 struct dp_netdev_flow *flow;
729 HMAP_FOR_EACH_WITH_HASH (flow, node, flow_hash(key, 0), &dp->flow_table) {
730 if (flow_equal(&flow->key, key)) {
738 get_dpif_flow_stats(struct dp_netdev_flow *flow, struct dpif_flow_stats *stats)
740 stats->n_packets = flow->packet_count;
741 stats->n_bytes = flow->byte_count;
742 stats->used = flow->used;
743 stats->tcp_flags = flow->tcp_flags;
747 dpif_netdev_flow_from_nlattrs(const struct nlattr *key, uint32_t key_len,
752 if (odp_flow_key_to_flow(key, key_len, flow) != ODP_FIT_PERFECT) {
753 /* This should not happen: it indicates that odp_flow_key_from_flow()
754 * and odp_flow_key_to_flow() disagree on the acceptable form of a
755 * flow. Log the problem as an error, with enough details to enable
757 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
759 if (!VLOG_DROP_ERR(&rl)) {
763 odp_flow_key_format(key, key_len, &s);
764 VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s));
771 in_port = flow->in_port.odp_port;
772 if (!is_valid_port_number(in_port) && in_port != ODPP_NONE) {
780 dpif_netdev_flow_get(const struct dpif *dpif,
781 const struct nlattr *nl_key, size_t nl_key_len,
782 struct ofpbuf **actionsp, struct dpif_flow_stats *stats)
784 struct dp_netdev *dp = get_dp_netdev(dpif);
785 struct dp_netdev_flow *flow;
789 error = dpif_netdev_flow_from_nlattrs(nl_key, nl_key_len, &key);
794 xpthread_mutex_lock(&dp_netdev_mutex);
795 flow = dp_netdev_lookup_flow(dp, &key);
798 get_dpif_flow_stats(flow, stats);
801 *actionsp = ofpbuf_clone_data(flow->actions, flow->actions_len);
806 xpthread_mutex_unlock(&dp_netdev_mutex);
812 set_flow_actions(struct dp_netdev_flow *flow,
813 const struct nlattr *actions, size_t actions_len)
815 flow->actions = xrealloc(flow->actions, actions_len);
816 flow->actions_len = actions_len;
817 memcpy(flow->actions, actions, actions_len);
822 dp_netdev_flow_add(struct dp_netdev *dp, const struct flow *key,
823 const struct nlattr *actions, size_t actions_len)
825 struct dp_netdev_flow *flow;
828 flow = xzalloc(sizeof *flow);
831 error = set_flow_actions(flow, actions, actions_len);
837 hmap_insert(&dp->flow_table, &flow->node, flow_hash(&flow->key, 0));
842 clear_stats(struct dp_netdev_flow *flow)
845 flow->packet_count = 0;
846 flow->byte_count = 0;
851 dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put)
853 struct dp_netdev *dp = get_dp_netdev(dpif);
854 struct dp_netdev_flow *flow;
858 error = dpif_netdev_flow_from_nlattrs(put->key, put->key_len, &key);
863 xpthread_mutex_lock(&dp_netdev_mutex);
864 flow = dp_netdev_lookup_flow(dp, &key);
866 if (put->flags & DPIF_FP_CREATE) {
867 if (hmap_count(&dp->flow_table) < MAX_FLOWS) {
869 memset(put->stats, 0, sizeof *put->stats);
871 error = dp_netdev_flow_add(dp, &key, put->actions,
880 if (put->flags & DPIF_FP_MODIFY) {
881 error = set_flow_actions(flow, put->actions, put->actions_len);
884 get_dpif_flow_stats(flow, put->stats);
886 if (put->flags & DPIF_FP_ZERO_STATS) {
894 xpthread_mutex_unlock(&dp_netdev_mutex);
900 dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del)
902 struct dp_netdev *dp = get_dp_netdev(dpif);
903 struct dp_netdev_flow *flow;
907 error = dpif_netdev_flow_from_nlattrs(del->key, del->key_len, &key);
912 xpthread_mutex_lock(&dp_netdev_mutex);
913 flow = dp_netdev_lookup_flow(dp, &key);
916 get_dpif_flow_stats(flow, del->stats);
918 dp_netdev_free_flow(dp, flow);
922 xpthread_mutex_unlock(&dp_netdev_mutex);
927 struct dp_netdev_flow_state {
930 struct nlattr *actions;
931 struct odputil_keybuf keybuf;
932 struct dpif_flow_stats stats;
936 dpif_netdev_flow_dump_start(const struct dpif *dpif OVS_UNUSED, void **statep)
938 struct dp_netdev_flow_state *state;
940 *statep = state = xmalloc(sizeof *state);
943 state->actions = NULL;
948 dpif_netdev_flow_dump_next(const struct dpif *dpif, void *state_,
949 const struct nlattr **key, size_t *key_len,
950 const struct nlattr **mask, size_t *mask_len,
951 const struct nlattr **actions, size_t *actions_len,
952 const struct dpif_flow_stats **stats)
954 struct dp_netdev_flow_state *state = state_;
955 struct dp_netdev *dp = get_dp_netdev(dpif);
956 struct dp_netdev_flow *flow;
957 struct hmap_node *node;
959 xpthread_mutex_lock(&dp_netdev_mutex);
960 node = hmap_at_position(&dp->flow_table, &state->bucket, &state->offset);
962 xpthread_mutex_unlock(&dp_netdev_mutex);
966 flow = CONTAINER_OF(node, struct dp_netdev_flow, node);
971 ofpbuf_use_stack(&buf, &state->keybuf, sizeof state->keybuf);
972 odp_flow_key_from_flow(&buf, &flow->key, flow->key.in_port.odp_port);
984 free(state->actions);
985 state->actions = xmemdup(flow->actions, flow->actions_len);
987 *actions = state->actions;
988 *actions_len = flow->actions_len;
992 get_dpif_flow_stats(flow, &state->stats);
993 *stats = &state->stats;
996 xpthread_mutex_unlock(&dp_netdev_mutex);
1001 dpif_netdev_flow_dump_done(const struct dpif *dpif OVS_UNUSED, void *state_)
1003 struct dp_netdev_flow_state *state = state_;
1005 free(state->actions);
1011 dpif_netdev_execute(struct dpif *dpif, const struct dpif_execute *execute)
1013 struct dp_netdev *dp = get_dp_netdev(dpif);
1018 if (execute->packet->size < ETH_HEADER_LEN ||
1019 execute->packet->size > UINT16_MAX) {
1023 /* Make a deep copy of 'packet', because we might modify its data. */
1024 ofpbuf_init(©, DP_NETDEV_HEADROOM + execute->packet->size);
1025 ofpbuf_reserve(©, DP_NETDEV_HEADROOM);
1026 ofpbuf_put(©, execute->packet->data, execute->packet->size);
1028 flow_extract(©, 0, 0, NULL, NULL, &key);
1029 error = dpif_netdev_flow_from_nlattrs(execute->key, execute->key_len,
1032 xpthread_mutex_lock(&dp_netdev_mutex);
1033 dp_netdev_execute_actions(dp, ©, &key,
1034 execute->actions, execute->actions_len);
1035 xpthread_mutex_unlock(&dp_netdev_mutex);
1038 ofpbuf_uninit(©);
1043 dpif_netdev_recv_set(struct dpif *dpif OVS_UNUSED, bool enable OVS_UNUSED)
1049 dpif_netdev_queue_to_priority(const struct dpif *dpif OVS_UNUSED,
1050 uint32_t queue_id, uint32_t *priority)
1052 *priority = queue_id;
1056 static struct dp_netdev_queue *
1057 find_nonempty_queue(struct dpif *dpif)
1059 struct dp_netdev *dp = get_dp_netdev(dpif);
1062 for (i = 0; i < N_QUEUES; i++) {
1063 struct dp_netdev_queue *q = &dp->queues[i];
1064 if (q->head != q->tail) {
1072 dpif_netdev_recv(struct dpif *dpif, struct dpif_upcall *upcall,
1075 struct dp_netdev_queue *q;
1078 xpthread_mutex_lock(&dp_netdev_mutex);
1079 q = find_nonempty_queue(dpif);
1081 struct dp_netdev_upcall *u = &q->upcalls[q->tail++ & QUEUE_MASK];
1083 *upcall = u->upcall;
1084 upcall->packet = buf;
1093 xpthread_mutex_unlock(&dp_netdev_mutex);
1099 dpif_netdev_recv_wait(struct dpif *dpif)
1101 /* XXX In a multithreaded process, there is a race window between this
1102 * function and the poll_block() in one thread and a packet being queued in
1103 * another thread. */
1105 xpthread_mutex_lock(&dp_netdev_mutex);
1106 if (find_nonempty_queue(dpif)) {
1107 poll_immediate_wake();
1109 xpthread_mutex_unlock(&dp_netdev_mutex);
1113 dpif_netdev_recv_purge(struct dpif *dpif)
1115 struct dpif_netdev *dpif_netdev = dpif_netdev_cast(dpif);
1116 xpthread_mutex_lock(&dp_netdev_mutex);
1117 dp_netdev_purge_queues(dpif_netdev->dp);
1118 xpthread_mutex_unlock(&dp_netdev_mutex);
1122 dp_netdev_flow_used(struct dp_netdev_flow *flow, const struct ofpbuf *packet)
1124 flow->used = time_msec();
1125 flow->packet_count++;
1126 flow->byte_count += packet->size;
1127 flow->tcp_flags |= packet_get_tcp_flags(packet, &flow->key);
1131 dp_netdev_port_input(struct dp_netdev *dp, struct dp_netdev_port *port,
1132 struct ofpbuf *packet, uint32_t skb_priority,
1133 uint32_t skb_mark, const struct flow_tnl *tnl)
1135 struct dp_netdev_flow *flow;
1137 union flow_in_port in_port_;
1139 if (packet->size < ETH_HEADER_LEN) {
1142 in_port_.odp_port = port->port_no;
1143 flow_extract(packet, skb_priority, skb_mark, tnl, &in_port_, &key);
1144 flow = dp_netdev_lookup_flow(dp, &key);
1146 dp_netdev_flow_used(flow, packet);
1147 dp_netdev_execute_actions(dp, packet, &key,
1148 flow->actions, flow->actions_len);
1152 dp_netdev_output_userspace(dp, packet, DPIF_UC_MISS, &key, NULL);
1157 dpif_netdev_run(struct dpif *dpif)
1159 struct dp_netdev_port *port;
1160 struct dp_netdev *dp;
1161 struct ofpbuf packet;
1163 xpthread_mutex_lock(&dp_netdev_mutex);
1164 dp = get_dp_netdev(dpif);
1165 ofpbuf_init(&packet,
1166 DP_NETDEV_HEADROOM + VLAN_ETH_HEADER_LEN + dp->max_mtu);
1168 LIST_FOR_EACH (port, node, &dp->port_list) {
1171 /* Reset packet contents. */
1172 ofpbuf_clear(&packet);
1173 ofpbuf_reserve(&packet, DP_NETDEV_HEADROOM);
1175 error = port->rx ? netdev_rx_recv(port->rx, &packet) : EOPNOTSUPP;
1177 dp_netdev_port_input(dp, port, &packet, 0, 0, NULL);
1178 } else if (error != EAGAIN && error != EOPNOTSUPP) {
1179 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1181 VLOG_ERR_RL(&rl, "error receiving data from %s: %s",
1182 netdev_get_name(port->netdev), ovs_strerror(error));
1185 ofpbuf_uninit(&packet);
1186 xpthread_mutex_unlock(&dp_netdev_mutex);
1190 dpif_netdev_wait(struct dpif *dpif)
1192 struct dp_netdev_port *port;
1194 /* There is a race here, if thread A calls dpif_netdev_wait(dpif) and
1195 * thread B calls dpif_port_add(dpif) or dpif_port_remove(dpif) before
1196 * A makes it to poll_block().
1198 * But I think it doesn't matter:
1200 * - In the dpif_port_add() case, A will not wake up when a packet
1201 * arrives on the new port, but this would also happen if the
1202 * ordering were reversed.
1204 * - In the dpif_port_remove() case, A might wake up spuriously, but
1205 * that is harmless. */
1207 xpthread_mutex_lock(&dp_netdev_mutex);
1208 LIST_FOR_EACH (port, node, &get_dp_netdev(dpif)->port_list) {
1210 netdev_rx_wait(port->rx);
1213 xpthread_mutex_unlock(&dp_netdev_mutex);
1217 dp_netdev_output_port(void *dp_, struct ofpbuf *packet, uint32_t out_port)
1219 struct dp_netdev *dp = dp_;
1220 struct dp_netdev_port *p = dp->ports[out_port];
1222 netdev_send(p->netdev, packet);
1227 dp_netdev_output_userspace(struct dp_netdev *dp, const struct ofpbuf *packet,
1228 int queue_no, const struct flow *flow,
1229 const struct nlattr *userdata)
1231 struct dp_netdev_queue *q = &dp->queues[queue_no];
1232 if (q->head - q->tail < MAX_QUEUE_LEN) {
1233 struct dp_netdev_upcall *u = &q->upcalls[q->head++ & QUEUE_MASK];
1234 struct dpif_upcall *upcall = &u->upcall;
1235 struct ofpbuf *buf = &u->buf;
1238 upcall->type = queue_no;
1240 /* Allocate buffer big enough for everything. */
1241 buf_size = ODPUTIL_FLOW_KEY_BYTES + 2 + packet->size;
1243 buf_size += NLA_ALIGN(userdata->nla_len);
1245 ofpbuf_init(buf, buf_size);
1248 odp_flow_key_from_flow(buf, flow, flow->in_port.odp_port);
1249 upcall->key = buf->data;
1250 upcall->key_len = buf->size;
1254 upcall->userdata = ofpbuf_put(buf, userdata,
1255 NLA_ALIGN(userdata->nla_len));
1260 * We adjust 'data' and 'size' in 'buf' so that only the packet itself
1261 * is visible in 'upcall->packet'. The ODP flow and (if present)
1262 * userdata become part of the headroom. */
1263 ofpbuf_put_zeros(buf, 2);
1264 buf->data = ofpbuf_put(buf, packet->data, packet->size);
1265 buf->size = packet->size;
1266 upcall->packet = buf;
1276 dp_netdev_action_userspace(void *dp, struct ofpbuf *packet,
1277 const struct flow *key,
1278 const struct nlattr *userdata)
1280 dp_netdev_output_userspace(dp, packet, DPIF_UC_ACTION, key, userdata);
1284 dp_netdev_execute_actions(struct dp_netdev *dp,
1285 struct ofpbuf *packet, struct flow *key,
1286 const struct nlattr *actions,
1289 odp_execute_actions(dp, packet, key, actions, actions_len,
1290 dp_netdev_output_port, dp_netdev_action_userspace);
1293 const struct dpif_class dpif_netdev_class = {
1295 dpif_netdev_enumerate,
1296 dpif_netdev_port_open_type,
1299 dpif_netdev_destroy,
1302 dpif_netdev_get_stats,
1303 dpif_netdev_port_add,
1304 dpif_netdev_port_del,
1305 dpif_netdev_port_query_by_number,
1306 dpif_netdev_port_query_by_name,
1307 dpif_netdev_get_max_ports,
1308 NULL, /* port_get_pid */
1309 dpif_netdev_port_dump_start,
1310 dpif_netdev_port_dump_next,
1311 dpif_netdev_port_dump_done,
1312 dpif_netdev_port_poll,
1313 dpif_netdev_port_poll_wait,
1314 dpif_netdev_flow_get,
1315 dpif_netdev_flow_put,
1316 dpif_netdev_flow_del,
1317 dpif_netdev_flow_flush,
1318 dpif_netdev_flow_dump_start,
1319 dpif_netdev_flow_dump_next,
1320 dpif_netdev_flow_dump_done,
1321 dpif_netdev_execute,
1323 dpif_netdev_recv_set,
1324 dpif_netdev_queue_to_priority,
1326 dpif_netdev_recv_wait,
1327 dpif_netdev_recv_purge,
1331 dpif_dummy_register__(const char *type)
1333 struct dpif_class *class;
1335 class = xmalloc(sizeof *class);
1336 *class = dpif_netdev_class;
1337 class->type = xstrdup(type);
1338 dp_register_provider(class);
1342 dpif_dummy_register(bool override)
1349 dp_enumerate_types(&types);
1350 SSET_FOR_EACH (type, &types) {
1351 if (!dp_unregister_provider(type)) {
1352 dpif_dummy_register__(type);
1355 sset_destroy(&types);
1358 dpif_dummy_register__("dummy");