2 * Copyright (c) 2010, 2011, 2012, 2013 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "netdev-vport.h"
23 #include <sys/socket.h>
25 #include <sys/ioctl.h>
27 #include "byte-order.h"
34 #include "netdev-provider.h"
37 #include "route-table.h"
39 #include "socket-util.h"
42 VLOG_DEFINE_THIS_MODULE(netdev_vport);
44 #define VXLAN_DST_PORT 4789
45 #define LISP_DST_PORT 4341
47 #define DEFAULT_TTL 64
52 /* Protects all members below. */
53 struct ovs_mutex mutex;
55 unsigned int change_seq;
56 uint8_t etheraddr[ETH_ADDR_LEN];
57 struct netdev_stats stats;
60 struct netdev_tunnel_config tnl_cfg;
67 const char *dpif_port;
68 struct netdev_class netdev_class;
71 static int netdev_vport_construct(struct netdev *);
72 static int get_patch_config(const struct netdev *netdev, struct smap *args);
73 static int get_tunnel_config(const struct netdev *, struct smap *args);
74 static void netdev_vport_changed(struct netdev_vport *netdev)
75 OVS_REQUIRES(netdev->mutex);
78 is_vport_class(const struct netdev_class *class)
80 return class->construct == netdev_vport_construct;
83 static const struct vport_class *
84 vport_class_cast(const struct netdev_class *class)
86 ovs_assert(is_vport_class(class));
87 return CONTAINER_OF(class, struct vport_class, netdev_class);
90 static struct netdev_vport *
91 netdev_vport_cast(const struct netdev *netdev)
93 ovs_assert(is_vport_class(netdev_get_class(netdev)));
94 return CONTAINER_OF(netdev, struct netdev_vport, up);
97 static const struct netdev_tunnel_config *
98 get_netdev_tunnel_config(const struct netdev *netdev)
100 return &netdev_vport_cast(netdev)->tnl_cfg;
104 netdev_vport_is_patch(const struct netdev *netdev)
106 const struct netdev_class *class = netdev_get_class(netdev);
108 return class->get_config == get_patch_config;
112 netdev_vport_is_layer3(const struct netdev *dev)
114 const char *type = netdev_get_type(dev);
116 return (!strcmp("lisp", type));
120 netdev_vport_needs_dst_port(const struct netdev *dev)
122 const struct netdev_class *class = netdev_get_class(dev);
123 const char *type = netdev_get_type(dev);
125 return (class->get_config == get_tunnel_config &&
126 (!strcmp("vxlan", type) || !strcmp("lisp", type)));
130 netdev_vport_class_get_dpif_port(const struct netdev_class *class)
132 return is_vport_class(class) ? vport_class_cast(class)->dpif_port : NULL;
136 netdev_vport_get_dpif_port(const struct netdev *netdev,
137 char namebuf[], size_t bufsize)
139 if (netdev_vport_needs_dst_port(netdev)) {
140 const struct netdev_vport *vport = netdev_vport_cast(netdev);
141 const char *type = netdev_get_type(netdev);
144 * Note: IFNAMSIZ is 16 bytes long. The maximum length of a VXLAN
145 * or LISP port name below is 15 or 14 bytes respectively. Still,
146 * assert here on the size of strlen(type) in case that changes
149 BUILD_ASSERT(NETDEV_VPORT_NAME_BUFSIZE >= IFNAMSIZ);
150 ovs_assert(strlen(type) + 10 < IFNAMSIZ);
151 snprintf(namebuf, bufsize, "%s_sys_%d", type,
152 ntohs(vport->tnl_cfg.dst_port));
155 const struct netdev_class *class = netdev_get_class(netdev);
156 const char *dpif_port = netdev_vport_class_get_dpif_port(class);
157 return dpif_port ? dpif_port : netdev_get_name(netdev);
162 netdev_vport_get_dpif_port_strdup(const struct netdev *netdev)
164 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
166 return xstrdup(netdev_vport_get_dpif_port(netdev, namebuf,
170 static struct netdev *
171 netdev_vport_alloc(void)
173 struct netdev_vport *netdev = xzalloc(sizeof *netdev);
178 netdev_vport_construct(struct netdev *netdev_)
180 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
182 ovs_mutex_init(&netdev->mutex);
183 netdev->change_seq = 1;
184 eth_addr_random(netdev->etheraddr);
186 route_table_register();
192 netdev_vport_destruct(struct netdev *netdev_)
194 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
196 route_table_unregister();
198 ovs_mutex_destroy(&netdev->mutex);
202 netdev_vport_dealloc(struct netdev *netdev_)
204 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
209 netdev_vport_set_etheraddr(struct netdev *netdev_,
210 const uint8_t mac[ETH_ADDR_LEN])
212 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
214 ovs_mutex_lock(&netdev->mutex);
215 memcpy(netdev->etheraddr, mac, ETH_ADDR_LEN);
216 netdev_vport_changed(netdev);
217 ovs_mutex_unlock(&netdev->mutex);
223 netdev_vport_get_etheraddr(const struct netdev *netdev_,
224 uint8_t mac[ETH_ADDR_LEN])
226 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
228 ovs_mutex_lock(&netdev->mutex);
229 memcpy(mac, netdev->etheraddr, ETH_ADDR_LEN);
230 ovs_mutex_unlock(&netdev->mutex);
236 tunnel_get_status(const struct netdev *netdev_, struct smap *smap)
238 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
239 char iface[IFNAMSIZ];
242 ovs_mutex_lock(&netdev->mutex);
243 route = netdev->tnl_cfg.ip_dst;
244 ovs_mutex_unlock(&netdev->mutex);
246 if (route_table_get_name(route, iface)) {
247 struct netdev *egress_netdev;
249 smap_add(smap, "tunnel_egress_iface", iface);
251 if (!netdev_open(iface, "system", &egress_netdev)) {
252 smap_add(smap, "tunnel_egress_iface_carrier",
253 netdev_get_carrier(egress_netdev) ? "up" : "down");
254 netdev_close(egress_netdev);
262 netdev_vport_update_flags(struct netdev *netdev OVS_UNUSED,
263 enum netdev_flags off,
264 enum netdev_flags on OVS_UNUSED,
265 enum netdev_flags *old_flagsp)
267 if (off & (NETDEV_UP | NETDEV_PROMISC)) {
271 *old_flagsp = NETDEV_UP | NETDEV_PROMISC;
276 netdev_vport_change_seq(const struct netdev *netdev)
278 return netdev_vport_cast(netdev)->change_seq;
282 netdev_vport_run(void)
288 netdev_vport_wait(void)
293 /* Helper functions. */
296 netdev_vport_changed(struct netdev_vport *ndv)
299 if (!ndv->change_seq) {
304 /* Code specific to tunnel types. */
307 parse_key(const struct smap *args, const char *name,
308 bool *present, bool *flow)
315 s = smap_get(args, name);
317 s = smap_get(args, "key");
325 if (!strcmp(s, "flow")) {
329 return htonll(strtoull(s, NULL, 0));
334 set_tunnel_config(struct netdev *dev_, const struct smap *args)
336 struct netdev_vport *dev = netdev_vport_cast(dev_);
337 const char *name = netdev_get_name(dev_);
338 const char *type = netdev_get_type(dev_);
339 bool ipsec_mech_set, needs_dst_port, has_csum;
340 struct netdev_tunnel_config tnl_cfg;
341 struct smap_node *node;
343 has_csum = strstr(type, "gre");
344 ipsec_mech_set = false;
345 memset(&tnl_cfg, 0, sizeof tnl_cfg);
347 needs_dst_port = netdev_vport_needs_dst_port(dev_);
348 tnl_cfg.ipsec = strstr(type, "ipsec");
349 tnl_cfg.dont_fragment = true;
351 SMAP_FOR_EACH (node, args) {
352 if (!strcmp(node->key, "remote_ip")) {
353 struct in_addr in_addr;
354 if (!strcmp(node->value, "flow")) {
355 tnl_cfg.ip_dst_flow = true;
356 tnl_cfg.ip_dst = htonl(0);
357 } else if (lookup_ip(node->value, &in_addr)) {
358 VLOG_WARN("%s: bad %s 'remote_ip'", name, type);
359 } else if (ip_is_multicast(in_addr.s_addr)) {
360 VLOG_WARN("%s: multicast remote_ip="IP_FMT" not allowed",
361 name, IP_ARGS(in_addr.s_addr));
364 tnl_cfg.ip_dst = in_addr.s_addr;
366 } else if (!strcmp(node->key, "local_ip")) {
367 struct in_addr in_addr;
368 if (!strcmp(node->value, "flow")) {
369 tnl_cfg.ip_src_flow = true;
370 tnl_cfg.ip_src = htonl(0);
371 } else if (lookup_ip(node->value, &in_addr)) {
372 VLOG_WARN("%s: bad %s 'local_ip'", name, type);
374 tnl_cfg.ip_src = in_addr.s_addr;
376 } else if (!strcmp(node->key, "tos")) {
377 if (!strcmp(node->value, "inherit")) {
378 tnl_cfg.tos_inherit = true;
382 tos = strtol(node->value, &endptr, 0);
383 if (*endptr == '\0' && tos == (tos & IP_DSCP_MASK)) {
386 VLOG_WARN("%s: invalid TOS %s", name, node->value);
389 } else if (!strcmp(node->key, "ttl")) {
390 if (!strcmp(node->value, "inherit")) {
391 tnl_cfg.ttl_inherit = true;
393 tnl_cfg.ttl = atoi(node->value);
395 } else if (!strcmp(node->key, "dst_port") && needs_dst_port) {
396 tnl_cfg.dst_port = htons(atoi(node->value));
397 } else if (!strcmp(node->key, "csum") && has_csum) {
398 if (!strcmp(node->value, "true")) {
401 } else if (!strcmp(node->key, "df_default")) {
402 if (!strcmp(node->value, "false")) {
403 tnl_cfg.dont_fragment = false;
405 } else if (!strcmp(node->key, "peer_cert") && tnl_cfg.ipsec) {
406 if (smap_get(args, "certificate")) {
407 ipsec_mech_set = true;
409 const char *use_ssl_cert;
411 /* If the "use_ssl_cert" is true, then "certificate" and
412 * "private_key" will be pulled from the SSL table. The
413 * use of this option is strongly discouraged, since it
414 * will like be removed when multiple SSL configurations
415 * are supported by OVS.
417 use_ssl_cert = smap_get(args, "use_ssl_cert");
418 if (!use_ssl_cert || strcmp(use_ssl_cert, "true")) {
419 VLOG_ERR("%s: 'peer_cert' requires 'certificate' argument",
423 ipsec_mech_set = true;
425 } else if (!strcmp(node->key, "psk") && tnl_cfg.ipsec) {
426 ipsec_mech_set = true;
427 } else if (tnl_cfg.ipsec
428 && (!strcmp(node->key, "certificate")
429 || !strcmp(node->key, "private_key")
430 || !strcmp(node->key, "use_ssl_cert"))) {
431 /* Ignore options not used by the netdev. */
432 } else if (!strcmp(node->key, "key") ||
433 !strcmp(node->key, "in_key") ||
434 !strcmp(node->key, "out_key")) {
435 /* Handled separately below. */
437 VLOG_WARN("%s: unknown %s argument '%s'", name, type, node->key);
441 /* Add a default destination port for VXLAN if none specified. */
442 if (!strcmp(type, "vxlan") && !tnl_cfg.dst_port) {
443 tnl_cfg.dst_port = htons(VXLAN_DST_PORT);
446 /* Add a default destination port for LISP if none specified. */
447 if (!strcmp(type, "lisp") && !tnl_cfg.dst_port) {
448 tnl_cfg.dst_port = htons(LISP_DST_PORT);
452 static struct ovs_mutex mutex = OVS_MUTEX_INITIALIZER;
453 static pid_t pid = 0;
455 ovs_mutex_lock(&mutex);
457 char *file_name = xasprintf("%s/%s", ovs_rundir(),
458 "ovs-monitor-ipsec.pid");
459 pid = read_pidfile(file_name);
462 ovs_mutex_unlock(&mutex);
465 VLOG_ERR("%s: IPsec requires the ovs-monitor-ipsec daemon",
470 if (smap_get(args, "peer_cert") && smap_get(args, "psk")) {
471 VLOG_ERR("%s: cannot define both 'peer_cert' and 'psk'", name);
475 if (!ipsec_mech_set) {
476 VLOG_ERR("%s: IPsec requires an 'peer_cert' or psk' argument",
482 if (!tnl_cfg.ip_dst && !tnl_cfg.ip_dst_flow) {
483 VLOG_ERR("%s: %s type requires valid 'remote_ip' argument",
487 if (tnl_cfg.ip_src_flow && !tnl_cfg.ip_dst_flow) {
488 VLOG_ERR("%s: %s type requires 'remote_ip=flow' with 'local_ip=flow'",
493 tnl_cfg.ttl = DEFAULT_TTL;
496 tnl_cfg.in_key = parse_key(args, "in_key",
497 &tnl_cfg.in_key_present,
498 &tnl_cfg.in_key_flow);
500 tnl_cfg.out_key = parse_key(args, "out_key",
501 &tnl_cfg.out_key_present,
502 &tnl_cfg.out_key_flow);
504 ovs_mutex_lock(&dev->mutex);
505 dev->tnl_cfg = tnl_cfg;
506 netdev_vport_changed(dev);
507 ovs_mutex_unlock(&dev->mutex);
513 get_tunnel_config(const struct netdev *dev, struct smap *args)
515 struct netdev_vport *netdev = netdev_vport_cast(dev);
516 struct netdev_tunnel_config tnl_cfg;
518 ovs_mutex_lock(&netdev->mutex);
519 tnl_cfg = netdev->tnl_cfg;
520 ovs_mutex_unlock(&netdev->mutex);
522 if (tnl_cfg.ip_dst) {
523 smap_add_format(args, "remote_ip", IP_FMT, IP_ARGS(tnl_cfg.ip_dst));
524 } else if (tnl_cfg.ip_dst_flow) {
525 smap_add(args, "remote_ip", "flow");
528 if (tnl_cfg.ip_src) {
529 smap_add_format(args, "local_ip", IP_FMT, IP_ARGS(tnl_cfg.ip_src));
530 } else if (tnl_cfg.ip_src_flow) {
531 smap_add(args, "local_ip", "flow");
534 if (tnl_cfg.in_key_flow && tnl_cfg.out_key_flow) {
535 smap_add(args, "key", "flow");
536 } else if (tnl_cfg.in_key_present && tnl_cfg.out_key_present
537 && tnl_cfg.in_key == tnl_cfg.out_key) {
538 smap_add_format(args, "key", "%"PRIu64, ntohll(tnl_cfg.in_key));
540 if (tnl_cfg.in_key_flow) {
541 smap_add(args, "in_key", "flow");
542 } else if (tnl_cfg.in_key_present) {
543 smap_add_format(args, "in_key", "%"PRIu64,
544 ntohll(tnl_cfg.in_key));
547 if (tnl_cfg.out_key_flow) {
548 smap_add(args, "out_key", "flow");
549 } else if (tnl_cfg.out_key_present) {
550 smap_add_format(args, "out_key", "%"PRIu64,
551 ntohll(tnl_cfg.out_key));
555 if (tnl_cfg.ttl_inherit) {
556 smap_add(args, "ttl", "inherit");
557 } else if (tnl_cfg.ttl != DEFAULT_TTL) {
558 smap_add_format(args, "ttl", "%"PRIu8, tnl_cfg.ttl);
561 if (tnl_cfg.tos_inherit) {
562 smap_add(args, "tos", "inherit");
563 } else if (tnl_cfg.tos) {
564 smap_add_format(args, "tos", "0x%x", tnl_cfg.tos);
567 if (tnl_cfg.dst_port) {
568 uint16_t dst_port = ntohs(tnl_cfg.dst_port);
569 const char *type = netdev_get_type(dev);
571 if ((!strcmp("vxlan", type) && dst_port != VXLAN_DST_PORT) ||
572 (!strcmp("lisp", type) && dst_port != LISP_DST_PORT)) {
573 smap_add_format(args, "dst_port", "%d", dst_port);
578 smap_add(args, "csum", "true");
581 if (!tnl_cfg.dont_fragment) {
582 smap_add(args, "df_default", "false");
588 /* Code specific to patch ports. */
590 /* If 'netdev' is a patch port, returns the name of its peer as a malloc()'d
591 * string that the caller must free.
593 * If 'netdev' is not a patch port, returns NULL. */
595 netdev_vport_patch_peer(const struct netdev *netdev_)
599 if (netdev_vport_is_patch(netdev_)) {
600 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
602 ovs_mutex_lock(&netdev->mutex);
604 peer = xstrdup(netdev->peer);
606 ovs_mutex_unlock(&netdev->mutex);
613 netdev_vport_inc_rx(const struct netdev *netdev,
614 const struct dpif_flow_stats *stats)
616 if (is_vport_class(netdev_get_class(netdev))) {
617 struct netdev_vport *dev = netdev_vport_cast(netdev);
619 ovs_mutex_lock(&dev->mutex);
620 dev->stats.rx_packets += stats->n_packets;
621 dev->stats.rx_bytes += stats->n_bytes;
622 ovs_mutex_unlock(&dev->mutex);
627 netdev_vport_inc_tx(const struct netdev *netdev,
628 const struct dpif_flow_stats *stats)
630 if (is_vport_class(netdev_get_class(netdev))) {
631 struct netdev_vport *dev = netdev_vport_cast(netdev);
633 ovs_mutex_lock(&dev->mutex);
634 dev->stats.tx_packets += stats->n_packets;
635 dev->stats.tx_bytes += stats->n_bytes;
636 ovs_mutex_unlock(&dev->mutex);
641 get_patch_config(const struct netdev *dev_, struct smap *args)
643 struct netdev_vport *dev = netdev_vport_cast(dev_);
645 ovs_mutex_lock(&dev->mutex);
647 smap_add(args, "peer", dev->peer);
649 ovs_mutex_unlock(&dev->mutex);
655 set_patch_config(struct netdev *dev_, const struct smap *args)
657 struct netdev_vport *dev = netdev_vport_cast(dev_);
658 const char *name = netdev_get_name(dev_);
661 peer = smap_get(args, "peer");
663 VLOG_ERR("%s: patch type requires valid 'peer' argument", name);
667 if (smap_count(args) > 1) {
668 VLOG_ERR("%s: patch type takes only a 'peer' argument", name);
672 if (!strcmp(name, peer)) {
673 VLOG_ERR("%s: patch peer must not be self", name);
677 ovs_mutex_lock(&dev->mutex);
679 dev->peer = xstrdup(peer);
680 netdev_vport_changed(dev);
681 ovs_mutex_unlock(&dev->mutex);
687 get_stats(const struct netdev *netdev, struct netdev_stats *stats)
689 struct netdev_vport *dev = netdev_vport_cast(netdev);
691 ovs_mutex_lock(&dev->mutex);
693 ovs_mutex_unlock(&dev->mutex);
698 #define VPORT_FUNCTIONS(GET_CONFIG, SET_CONFIG, \
699 GET_TUNNEL_CONFIG, GET_STATUS) \
704 netdev_vport_alloc, \
705 netdev_vport_construct, \
706 netdev_vport_destruct, \
707 netdev_vport_dealloc, \
713 NULL, /* send_wait */ \
715 netdev_vport_set_etheraddr, \
716 netdev_vport_get_etheraddr, \
717 NULL, /* get_mtu */ \
718 NULL, /* set_mtu */ \
719 NULL, /* get_ifindex */ \
720 NULL, /* get_carrier */ \
721 NULL, /* get_carrier_resets */ \
722 NULL, /* get_miimon */ \
724 NULL, /* set_stats */ \
726 NULL, /* get_features */ \
727 NULL, /* set_advertisements */ \
729 NULL, /* set_policing */ \
730 NULL, /* get_qos_types */ \
731 NULL, /* get_qos_capabilities */ \
732 NULL, /* get_qos */ \
733 NULL, /* set_qos */ \
734 NULL, /* get_queue */ \
735 NULL, /* set_queue */ \
736 NULL, /* delete_queue */ \
737 NULL, /* get_queue_stats */ \
738 NULL, /* queue_dump_start */ \
739 NULL, /* queue_dump_next */ \
740 NULL, /* queue_dump_done */ \
741 NULL, /* dump_queue_stats */ \
743 NULL, /* get_in4 */ \
744 NULL, /* set_in4 */ \
745 NULL, /* get_in6 */ \
746 NULL, /* add_router */ \
747 NULL, /* get_next_hop */ \
749 NULL, /* arp_lookup */ \
751 netdev_vport_update_flags, \
753 netdev_vport_change_seq, \
755 NULL, /* rx_alloc */ \
756 NULL, /* rx_construct */ \
757 NULL, /* rx_destruct */ \
758 NULL, /* rx_dealloc */ \
759 NULL, /* rx_recv */ \
760 NULL, /* rx_wait */ \
763 #define TUNNEL_CLASS(NAME, DPIF_PORT) \
765 { NAME, VPORT_FUNCTIONS(get_tunnel_config, \
767 get_netdev_tunnel_config, \
768 tunnel_get_status) }}
771 netdev_vport_tunnel_register(void)
773 static const struct vport_class vport_classes[] = {
774 TUNNEL_CLASS("gre", "gre_system"),
775 TUNNEL_CLASS("ipsec_gre", "gre_system"),
776 TUNNEL_CLASS("gre64", "gre64_system"),
777 TUNNEL_CLASS("ipsec_gre64", "gre64_system"),
778 TUNNEL_CLASS("vxlan", "vxlan_system"),
779 TUNNEL_CLASS("lisp", "lisp_system")
781 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
783 if (ovsthread_once_start(&once)) {
786 for (i = 0; i < ARRAY_SIZE(vport_classes); i++) {
787 netdev_register_provider(&vport_classes[i].netdev_class);
789 ovsthread_once_done(&once);
794 netdev_vport_patch_register(void)
796 static const struct vport_class patch_class =
798 { "patch", VPORT_FUNCTIONS(get_patch_config,
802 netdev_register_provider(&patch_class.netdev_class);