2 * Copyright (c) 2010, 2011, 2012, 2013 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "netdev-vport.h"
23 #include <sys/socket.h>
25 #include <sys/ioctl.h>
27 #include "byte-order.h"
34 #include "netdev-provider.h"
37 #include "poll-loop.h"
38 #include "route-table.h"
40 #include "socket-util.h"
43 VLOG_DEFINE_THIS_MODULE(netdev_vport);
45 #define VXLAN_DST_PORT 4789
46 #define LISP_DST_PORT 4341
48 #define DEFAULT_TTL 64
53 /* Protects all members below. */
54 struct ovs_mutex mutex;
56 uint8_t etheraddr[ETH_ADDR_LEN];
57 struct netdev_stats stats;
60 struct netdev_tunnel_config tnl_cfg;
61 char egress_iface[IFNAMSIZ];
69 const char *dpif_port;
70 struct netdev_class netdev_class;
73 /* Last read of the route-table's change number. */
74 static uint64_t rt_change_seqno;
76 static int netdev_vport_construct(struct netdev *);
77 static int get_patch_config(const struct netdev *netdev, struct smap *args);
78 static int get_tunnel_config(const struct netdev *, struct smap *args);
79 static bool tunnel_check_status_change__(struct netdev_vport *);
82 is_vport_class(const struct netdev_class *class)
84 return class->construct == netdev_vport_construct;
88 netdev_vport_is_vport_class(const struct netdev_class *class)
90 return is_vport_class(class);
93 static const struct vport_class *
94 vport_class_cast(const struct netdev_class *class)
96 ovs_assert(is_vport_class(class));
97 return CONTAINER_OF(class, struct vport_class, netdev_class);
100 static struct netdev_vport *
101 netdev_vport_cast(const struct netdev *netdev)
103 ovs_assert(is_vport_class(netdev_get_class(netdev)));
104 return CONTAINER_OF(netdev, struct netdev_vport, up);
107 static const struct netdev_tunnel_config *
108 get_netdev_tunnel_config(const struct netdev *netdev)
110 return &netdev_vport_cast(netdev)->tnl_cfg;
114 netdev_vport_is_patch(const struct netdev *netdev)
116 const struct netdev_class *class = netdev_get_class(netdev);
118 return class->get_config == get_patch_config;
122 netdev_vport_is_layer3(const struct netdev *dev)
124 const char *type = netdev_get_type(dev);
126 return (!strcmp("lisp", type));
130 netdev_vport_needs_dst_port(const struct netdev *dev)
132 const struct netdev_class *class = netdev_get_class(dev);
133 const char *type = netdev_get_type(dev);
135 return (class->get_config == get_tunnel_config &&
136 (!strcmp("vxlan", type) || !strcmp("lisp", type)));
140 netdev_vport_class_get_dpif_port(const struct netdev_class *class)
142 return is_vport_class(class) ? vport_class_cast(class)->dpif_port : NULL;
146 netdev_vport_get_dpif_port(const struct netdev *netdev,
147 char namebuf[], size_t bufsize)
149 if (netdev_vport_needs_dst_port(netdev)) {
150 const struct netdev_vport *vport = netdev_vport_cast(netdev);
151 const char *type = netdev_get_type(netdev);
154 * Note: IFNAMSIZ is 16 bytes long. The maximum length of a VXLAN
155 * or LISP port name below is 15 or 14 bytes respectively. Still,
156 * assert here on the size of strlen(type) in case that changes
159 BUILD_ASSERT(NETDEV_VPORT_NAME_BUFSIZE >= IFNAMSIZ);
160 ovs_assert(strlen(type) + 10 < IFNAMSIZ);
161 snprintf(namebuf, bufsize, "%s_sys_%d", type,
162 ntohs(vport->tnl_cfg.dst_port));
165 const struct netdev_class *class = netdev_get_class(netdev);
166 const char *dpif_port = netdev_vport_class_get_dpif_port(class);
167 return dpif_port ? dpif_port : netdev_get_name(netdev);
172 netdev_vport_get_dpif_port_strdup(const struct netdev *netdev)
174 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
176 return xstrdup(netdev_vport_get_dpif_port(netdev, namebuf,
180 /* Whenever the route-table change number is incremented,
181 * netdev_vport_route_changed() should be called to update
182 * the corresponding tunnel interface status. */
184 netdev_vport_route_changed(void)
186 struct netdev **vports;
189 vports = netdev_get_vports(&n_vports);
190 for (i = 0; i < n_vports; i++) {
191 struct netdev *netdev_ = vports[i];
192 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
194 ovs_mutex_lock(&netdev->mutex);
195 /* Finds all tunnel vports. */
196 if (netdev->tnl_cfg.ip_dst) {
197 if (tunnel_check_status_change__(netdev)) {
198 netdev_change_seq_changed(netdev_);
201 netdev_close(netdev_);
202 ovs_mutex_unlock(&netdev->mutex);
208 static struct netdev *
209 netdev_vport_alloc(void)
211 struct netdev_vport *netdev = xzalloc(sizeof *netdev);
216 netdev_vport_construct(struct netdev *netdev_)
218 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
220 ovs_mutex_init(&netdev->mutex);
221 eth_addr_random(netdev->etheraddr);
223 route_table_register();
229 netdev_vport_destruct(struct netdev *netdev_)
231 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
233 route_table_unregister();
235 ovs_mutex_destroy(&netdev->mutex);
239 netdev_vport_dealloc(struct netdev *netdev_)
241 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
246 netdev_vport_set_etheraddr(struct netdev *netdev_,
247 const uint8_t mac[ETH_ADDR_LEN])
249 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
251 ovs_mutex_lock(&netdev->mutex);
252 memcpy(netdev->etheraddr, mac, ETH_ADDR_LEN);
253 ovs_mutex_unlock(&netdev->mutex);
254 netdev_change_seq_changed(netdev_);
260 netdev_vport_get_etheraddr(const struct netdev *netdev_,
261 uint8_t mac[ETH_ADDR_LEN])
263 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
265 ovs_mutex_lock(&netdev->mutex);
266 memcpy(mac, netdev->etheraddr, ETH_ADDR_LEN);
267 ovs_mutex_unlock(&netdev->mutex);
272 /* Checks if the tunnel status has changed and returns a boolean.
273 * Updates the tunnel status if it has changed. */
275 tunnel_check_status_change__(struct netdev_vport *netdev)
276 OVS_REQUIRES(netdev->mutex)
278 char iface[IFNAMSIZ];
283 route = netdev->tnl_cfg.ip_dst;
284 if (route_table_get_name(route, iface)) {
285 struct netdev *egress_netdev;
287 if (!netdev_open(iface, "system", &egress_netdev)) {
288 status = netdev_get_carrier(egress_netdev);
289 netdev_close(egress_netdev);
293 if (strcmp(netdev->egress_iface, iface)
294 || netdev->carrier_status != status) {
295 ovs_strlcpy(netdev->egress_iface, iface, IFNAMSIZ);
296 netdev->carrier_status = status;
305 tunnel_get_status(const struct netdev *netdev_, struct smap *smap)
307 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
309 if (netdev->egress_iface[0]) {
310 smap_add(smap, "tunnel_egress_iface", netdev->egress_iface);
312 smap_add(smap, "tunnel_egress_iface_carrier",
313 netdev->carrier_status ? "up" : "down");
320 netdev_vport_update_flags(struct netdev *netdev OVS_UNUSED,
321 enum netdev_flags off,
322 enum netdev_flags on OVS_UNUSED,
323 enum netdev_flags *old_flagsp)
325 if (off & (NETDEV_UP | NETDEV_PROMISC)) {
329 *old_flagsp = NETDEV_UP | NETDEV_PROMISC;
334 netdev_vport_run(void)
339 seq = route_table_get_change_seq();
340 if (rt_change_seqno != seq) {
341 rt_change_seqno = seq;
342 netdev_vport_route_changed();
347 netdev_vport_wait(void)
352 seq = route_table_get_change_seq();
353 if (rt_change_seqno != seq) {
354 poll_immediate_wake();
358 /* Code specific to tunnel types. */
361 parse_key(const struct smap *args, const char *name,
362 bool *present, bool *flow)
369 s = smap_get(args, name);
371 s = smap_get(args, "key");
379 if (!strcmp(s, "flow")) {
383 return htonll(strtoull(s, NULL, 0));
388 set_tunnel_config(struct netdev *dev_, const struct smap *args)
390 struct netdev_vport *dev = netdev_vport_cast(dev_);
391 const char *name = netdev_get_name(dev_);
392 const char *type = netdev_get_type(dev_);
393 bool ipsec_mech_set, needs_dst_port, has_csum;
394 struct netdev_tunnel_config tnl_cfg;
395 struct smap_node *node;
397 has_csum = strstr(type, "gre");
398 ipsec_mech_set = false;
399 memset(&tnl_cfg, 0, sizeof tnl_cfg);
401 needs_dst_port = netdev_vport_needs_dst_port(dev_);
402 tnl_cfg.ipsec = strstr(type, "ipsec");
403 tnl_cfg.dont_fragment = true;
405 SMAP_FOR_EACH (node, args) {
406 if (!strcmp(node->key, "remote_ip")) {
407 struct in_addr in_addr;
408 if (!strcmp(node->value, "flow")) {
409 tnl_cfg.ip_dst_flow = true;
410 tnl_cfg.ip_dst = htonl(0);
411 } else if (lookup_ip(node->value, &in_addr)) {
412 VLOG_WARN("%s: bad %s 'remote_ip'", name, type);
413 } else if (ip_is_multicast(in_addr.s_addr)) {
414 VLOG_WARN("%s: multicast remote_ip="IP_FMT" not allowed",
415 name, IP_ARGS(in_addr.s_addr));
418 tnl_cfg.ip_dst = in_addr.s_addr;
420 } else if (!strcmp(node->key, "local_ip")) {
421 struct in_addr in_addr;
422 if (!strcmp(node->value, "flow")) {
423 tnl_cfg.ip_src_flow = true;
424 tnl_cfg.ip_src = htonl(0);
425 } else if (lookup_ip(node->value, &in_addr)) {
426 VLOG_WARN("%s: bad %s 'local_ip'", name, type);
428 tnl_cfg.ip_src = in_addr.s_addr;
430 } else if (!strcmp(node->key, "tos")) {
431 if (!strcmp(node->value, "inherit")) {
432 tnl_cfg.tos_inherit = true;
436 tos = strtol(node->value, &endptr, 0);
437 if (*endptr == '\0' && tos == (tos & IP_DSCP_MASK)) {
440 VLOG_WARN("%s: invalid TOS %s", name, node->value);
443 } else if (!strcmp(node->key, "ttl")) {
444 if (!strcmp(node->value, "inherit")) {
445 tnl_cfg.ttl_inherit = true;
447 tnl_cfg.ttl = atoi(node->value);
449 } else if (!strcmp(node->key, "dst_port") && needs_dst_port) {
450 tnl_cfg.dst_port = htons(atoi(node->value));
451 } else if (!strcmp(node->key, "csum") && has_csum) {
452 if (!strcmp(node->value, "true")) {
455 } else if (!strcmp(node->key, "df_default")) {
456 if (!strcmp(node->value, "false")) {
457 tnl_cfg.dont_fragment = false;
459 } else if (!strcmp(node->key, "peer_cert") && tnl_cfg.ipsec) {
460 if (smap_get(args, "certificate")) {
461 ipsec_mech_set = true;
463 const char *use_ssl_cert;
465 /* If the "use_ssl_cert" is true, then "certificate" and
466 * "private_key" will be pulled from the SSL table. The
467 * use of this option is strongly discouraged, since it
468 * will like be removed when multiple SSL configurations
469 * are supported by OVS.
471 use_ssl_cert = smap_get(args, "use_ssl_cert");
472 if (!use_ssl_cert || strcmp(use_ssl_cert, "true")) {
473 VLOG_ERR("%s: 'peer_cert' requires 'certificate' argument",
477 ipsec_mech_set = true;
479 } else if (!strcmp(node->key, "psk") && tnl_cfg.ipsec) {
480 ipsec_mech_set = true;
481 } else if (tnl_cfg.ipsec
482 && (!strcmp(node->key, "certificate")
483 || !strcmp(node->key, "private_key")
484 || !strcmp(node->key, "use_ssl_cert"))) {
485 /* Ignore options not used by the netdev. */
486 } else if (!strcmp(node->key, "key") ||
487 !strcmp(node->key, "in_key") ||
488 !strcmp(node->key, "out_key")) {
489 /* Handled separately below. */
491 VLOG_WARN("%s: unknown %s argument '%s'", name, type, node->key);
495 /* Add a default destination port for VXLAN if none specified. */
496 if (!strcmp(type, "vxlan") && !tnl_cfg.dst_port) {
497 tnl_cfg.dst_port = htons(VXLAN_DST_PORT);
500 /* Add a default destination port for LISP if none specified. */
501 if (!strcmp(type, "lisp") && !tnl_cfg.dst_port) {
502 tnl_cfg.dst_port = htons(LISP_DST_PORT);
506 static struct ovs_mutex mutex = OVS_MUTEX_INITIALIZER;
507 static pid_t pid = 0;
510 ovs_mutex_lock(&mutex);
512 char *file_name = xasprintf("%s/%s", ovs_rundir(),
513 "ovs-monitor-ipsec.pid");
514 pid = read_pidfile(file_name);
517 ovs_mutex_unlock(&mutex);
521 VLOG_ERR("%s: IPsec requires the ovs-monitor-ipsec daemon",
526 if (smap_get(args, "peer_cert") && smap_get(args, "psk")) {
527 VLOG_ERR("%s: cannot define both 'peer_cert' and 'psk'", name);
531 if (!ipsec_mech_set) {
532 VLOG_ERR("%s: IPsec requires an 'peer_cert' or psk' argument",
538 if (!tnl_cfg.ip_dst && !tnl_cfg.ip_dst_flow) {
539 VLOG_ERR("%s: %s type requires valid 'remote_ip' argument",
543 if (tnl_cfg.ip_src_flow && !tnl_cfg.ip_dst_flow) {
544 VLOG_ERR("%s: %s type requires 'remote_ip=flow' with 'local_ip=flow'",
549 tnl_cfg.ttl = DEFAULT_TTL;
552 tnl_cfg.in_key = parse_key(args, "in_key",
553 &tnl_cfg.in_key_present,
554 &tnl_cfg.in_key_flow);
556 tnl_cfg.out_key = parse_key(args, "out_key",
557 &tnl_cfg.out_key_present,
558 &tnl_cfg.out_key_flow);
560 ovs_mutex_lock(&dev->mutex);
561 dev->tnl_cfg = tnl_cfg;
562 tunnel_check_status_change__(dev);
563 netdev_change_seq_changed(dev_);
564 ovs_mutex_unlock(&dev->mutex);
570 get_tunnel_config(const struct netdev *dev, struct smap *args)
572 struct netdev_vport *netdev = netdev_vport_cast(dev);
573 struct netdev_tunnel_config tnl_cfg;
575 ovs_mutex_lock(&netdev->mutex);
576 tnl_cfg = netdev->tnl_cfg;
577 ovs_mutex_unlock(&netdev->mutex);
579 if (tnl_cfg.ip_dst) {
580 smap_add_format(args, "remote_ip", IP_FMT, IP_ARGS(tnl_cfg.ip_dst));
581 } else if (tnl_cfg.ip_dst_flow) {
582 smap_add(args, "remote_ip", "flow");
585 if (tnl_cfg.ip_src) {
586 smap_add_format(args, "local_ip", IP_FMT, IP_ARGS(tnl_cfg.ip_src));
587 } else if (tnl_cfg.ip_src_flow) {
588 smap_add(args, "local_ip", "flow");
591 if (tnl_cfg.in_key_flow && tnl_cfg.out_key_flow) {
592 smap_add(args, "key", "flow");
593 } else if (tnl_cfg.in_key_present && tnl_cfg.out_key_present
594 && tnl_cfg.in_key == tnl_cfg.out_key) {
595 smap_add_format(args, "key", "%"PRIu64, ntohll(tnl_cfg.in_key));
597 if (tnl_cfg.in_key_flow) {
598 smap_add(args, "in_key", "flow");
599 } else if (tnl_cfg.in_key_present) {
600 smap_add_format(args, "in_key", "%"PRIu64,
601 ntohll(tnl_cfg.in_key));
604 if (tnl_cfg.out_key_flow) {
605 smap_add(args, "out_key", "flow");
606 } else if (tnl_cfg.out_key_present) {
607 smap_add_format(args, "out_key", "%"PRIu64,
608 ntohll(tnl_cfg.out_key));
612 if (tnl_cfg.ttl_inherit) {
613 smap_add(args, "ttl", "inherit");
614 } else if (tnl_cfg.ttl != DEFAULT_TTL) {
615 smap_add_format(args, "ttl", "%"PRIu8, tnl_cfg.ttl);
618 if (tnl_cfg.tos_inherit) {
619 smap_add(args, "tos", "inherit");
620 } else if (tnl_cfg.tos) {
621 smap_add_format(args, "tos", "0x%x", tnl_cfg.tos);
624 if (tnl_cfg.dst_port) {
625 uint16_t dst_port = ntohs(tnl_cfg.dst_port);
626 const char *type = netdev_get_type(dev);
628 if ((!strcmp("vxlan", type) && dst_port != VXLAN_DST_PORT) ||
629 (!strcmp("lisp", type) && dst_port != LISP_DST_PORT)) {
630 smap_add_format(args, "dst_port", "%d", dst_port);
635 smap_add(args, "csum", "true");
638 if (!tnl_cfg.dont_fragment) {
639 smap_add(args, "df_default", "false");
645 /* Code specific to patch ports. */
647 /* If 'netdev' is a patch port, returns the name of its peer as a malloc()'d
648 * string that the caller must free.
650 * If 'netdev' is not a patch port, returns NULL. */
652 netdev_vport_patch_peer(const struct netdev *netdev_)
656 if (netdev_vport_is_patch(netdev_)) {
657 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
659 ovs_mutex_lock(&netdev->mutex);
661 peer = xstrdup(netdev->peer);
663 ovs_mutex_unlock(&netdev->mutex);
670 netdev_vport_inc_rx(const struct netdev *netdev,
671 const struct dpif_flow_stats *stats)
673 if (is_vport_class(netdev_get_class(netdev))) {
674 struct netdev_vport *dev = netdev_vport_cast(netdev);
676 ovs_mutex_lock(&dev->mutex);
677 dev->stats.rx_packets += stats->n_packets;
678 dev->stats.rx_bytes += stats->n_bytes;
679 ovs_mutex_unlock(&dev->mutex);
684 netdev_vport_inc_tx(const struct netdev *netdev,
685 const struct dpif_flow_stats *stats)
687 if (is_vport_class(netdev_get_class(netdev))) {
688 struct netdev_vport *dev = netdev_vport_cast(netdev);
690 ovs_mutex_lock(&dev->mutex);
691 dev->stats.tx_packets += stats->n_packets;
692 dev->stats.tx_bytes += stats->n_bytes;
693 ovs_mutex_unlock(&dev->mutex);
698 get_patch_config(const struct netdev *dev_, struct smap *args)
700 struct netdev_vport *dev = netdev_vport_cast(dev_);
702 ovs_mutex_lock(&dev->mutex);
704 smap_add(args, "peer", dev->peer);
706 ovs_mutex_unlock(&dev->mutex);
712 set_patch_config(struct netdev *dev_, const struct smap *args)
714 struct netdev_vport *dev = netdev_vport_cast(dev_);
715 const char *name = netdev_get_name(dev_);
718 peer = smap_get(args, "peer");
720 VLOG_ERR("%s: patch type requires valid 'peer' argument", name);
724 if (smap_count(args) > 1) {
725 VLOG_ERR("%s: patch type takes only a 'peer' argument", name);
729 if (!strcmp(name, peer)) {
730 VLOG_ERR("%s: patch peer must not be self", name);
734 ovs_mutex_lock(&dev->mutex);
736 dev->peer = xstrdup(peer);
737 netdev_change_seq_changed(dev_);
738 ovs_mutex_unlock(&dev->mutex);
744 get_stats(const struct netdev *netdev, struct netdev_stats *stats)
746 struct netdev_vport *dev = netdev_vport_cast(netdev);
748 ovs_mutex_lock(&dev->mutex);
750 ovs_mutex_unlock(&dev->mutex);
755 #define VPORT_FUNCTIONS(GET_CONFIG, SET_CONFIG, \
756 GET_TUNNEL_CONFIG, GET_STATUS) \
761 netdev_vport_alloc, \
762 netdev_vport_construct, \
763 netdev_vport_destruct, \
764 netdev_vport_dealloc, \
770 NULL, /* send_wait */ \
772 netdev_vport_set_etheraddr, \
773 netdev_vport_get_etheraddr, \
774 NULL, /* get_mtu */ \
775 NULL, /* set_mtu */ \
776 NULL, /* get_ifindex */ \
777 NULL, /* get_carrier */ \
778 NULL, /* get_carrier_resets */ \
779 NULL, /* get_miimon */ \
781 NULL, /* set_stats */ \
783 NULL, /* get_features */ \
784 NULL, /* set_advertisements */ \
786 NULL, /* set_policing */ \
787 NULL, /* get_qos_types */ \
788 NULL, /* get_qos_capabilities */ \
789 NULL, /* get_qos */ \
790 NULL, /* set_qos */ \
791 NULL, /* get_queue */ \
792 NULL, /* set_queue */ \
793 NULL, /* delete_queue */ \
794 NULL, /* get_queue_stats */ \
795 NULL, /* queue_dump_start */ \
796 NULL, /* queue_dump_next */ \
797 NULL, /* queue_dump_done */ \
798 NULL, /* dump_queue_stats */ \
800 NULL, /* get_in4 */ \
801 NULL, /* set_in4 */ \
802 NULL, /* get_in6 */ \
803 NULL, /* add_router */ \
804 NULL, /* get_next_hop */ \
806 NULL, /* arp_lookup */ \
808 netdev_vport_update_flags, \
810 NULL, /* rx_alloc */ \
811 NULL, /* rx_construct */ \
812 NULL, /* rx_destruct */ \
813 NULL, /* rx_dealloc */ \
814 NULL, /* rx_recv */ \
815 NULL, /* rx_wait */ \
818 #define TUNNEL_CLASS(NAME, DPIF_PORT) \
820 { NAME, VPORT_FUNCTIONS(get_tunnel_config, \
822 get_netdev_tunnel_config, \
823 tunnel_get_status) }}
826 netdev_vport_tunnel_register(void)
828 static const struct vport_class vport_classes[] = {
829 TUNNEL_CLASS("gre", "gre_system"),
830 TUNNEL_CLASS("ipsec_gre", "gre_system"),
831 TUNNEL_CLASS("gre64", "gre64_system"),
832 TUNNEL_CLASS("ipsec_gre64", "gre64_system"),
833 TUNNEL_CLASS("vxlan", "vxlan_system"),
834 TUNNEL_CLASS("lisp", "lisp_system")
836 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
838 if (ovsthread_once_start(&once)) {
841 for (i = 0; i < ARRAY_SIZE(vport_classes); i++) {
842 netdev_register_provider(&vport_classes[i].netdev_class);
844 ovsthread_once_done(&once);
849 netdev_vport_patch_register(void)
851 static const struct vport_class patch_class =
853 { "patch", VPORT_FUNCTIONS(get_patch_config,
857 netdev_register_provider(&patch_class.netdev_class);