OVS_GREP_IFELSE([$KSRC/include/linux/skbuff.h], [consume_skb])
OVS_GREP_IFELSE([$KSRC/include/linux/skbuff.h], [skb_frag_page])
OVS_GREP_IFELSE([$KSRC/include/linux/skbuff.h], [skb_reset_mac_len])
+ OVS_GREP_IFELSE([$KSRC/include/linux/skbuff.h], [skb_unclone])
OVS_GREP_IFELSE([$KSRC/include/linux/string.h], [kmemdup], [],
[OVS_GREP_IFELSE([$KSRC/include/linux/slab.h], [kmemdup])])
OVS_CHECK_POSIX_AIO
OVS_ENABLE_OPTION([-Wall])
+OVS_ENABLE_OPTION([-Wextra])
OVS_ENABLE_OPTION([-Wno-sign-compare])
OVS_ENABLE_OPTION([-Wpointer-arith])
OVS_ENABLE_OPTION([-Wdeclaration-after-statement])
OVS_ENABLE_OPTION([-Wold-style-definition])
OVS_ENABLE_OPTION([-Wmissing-prototypes])
OVS_ENABLE_OPTION([-Wmissing-field-initializers])
-OVS_ENABLE_OPTION([-Wno-override-init])
OVS_CONDITIONAL_CC_OPTION([-Wno-unused], [HAVE_WNO_UNUSED])
OVS_CONDITIONAL_CC_OPTION([-Wno-unused-parameter], [HAVE_WNO_UNUSED_PARAMETER])
OVS_ENABLE_WERROR
#define rt_dst(rt) (rt->u.dst)
#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
+#define inet_sport(sk) (inet_sk(sk)->sport)
+#else
+#define inet_sport(sk) (inet_sk(sk)->inet_sport)
+#endif
+
#endif /* compat.h */
u32 seq, u32 flags, u8 cmd)
{
const int skb_orig_len = skb->len;
- struct sw_flow_mask *mask;
struct nlattr *start;
struct ovs_flow_stats stats;
struct ovs_header *ovs_header;
if (!nla)
goto nla_put_failure;
- mask = rcu_dereference_check(flow->mask, lockdep_ovsl_is_held());
- err = ovs_flow_to_nlattrs(&flow->key, &mask->key, skb);
+ err = ovs_flow_to_nlattrs(&flow->key, &flow->mask->key, skb);
if (err)
goto error;
}
ovs_sw_flow_mask_add_ref(mask_p);
- rcu_assign_pointer(flow->mask, mask_p);
+ flow->mask = mask_p;
rcu_assign_pointer(flow->sf_acts, acts);
/* Put flow in bucket. */
.size = sizeof(struct ovs_net),
};
+DEFINE_COMPAT_PNET_REG_FUNC(device);
+
static int __init dp_init(void)
{
int err;
if (!flow)
return;
- ovs_sw_flow_mask_del_ref((struct sw_flow_mask __force *)flow->mask,
- deferred);
+ ovs_sw_flow_mask_del_ref(flow->mask, deferred);
if (deferred)
call_rcu(&flow->rcu, rcu_free_flow_callback);
hash = ovs_flow_hash(&masked_key, key_start, key_len);
head = find_bucket(table, hash);
hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
- if (__flow_cmp_key(flow, &masked_key, key_start, key_len))
+ if (flow->mask == mask &&
+ __flow_cmp_key(flow, &masked_key, key_start, key_len))
return flow;
}
return NULL;
void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow)
{
- flow->hash = ovs_flow_hash(&flow->key,
- ovsl_dereference(flow->mask)->range.start,
- ovsl_dereference(flow->mask)->range.end);
+ flow->hash = ovs_flow_hash(&flow->key, flow->mask->range.start,
+ flow->mask->range.end);
__tbl_insert(table, flow);
}
struct sw_flow_key key;
struct sw_flow_key unmasked_key;
- struct sw_flow_mask __rcu *mask;
+ struct sw_flow_mask *mask;
struct sw_flow_actions __rcu *sf_acts;
spinlock_t lock; /* Lock for values below. */
linux/compat/dev-openvswitch.c \
linux/compat/exthdrs_core.c \
linux/compat/flex_array.c \
+ linux/compat/flow_dissector.c \
linux/compat/gre.c \
linux/compat/gso.c \
linux/compat/genetlink-openvswitch.c \
linux/compat/reciprocal_div.c \
linux/compat/skbuff-openvswitch.c \
linux/compat/time.c \
+ linux/compat/vxlan.c \
linux/compat/workqueue.c
openvswitch_headers += \
linux/compat/gso.h \
linux/compat/include/linux/workqueue.h \
linux/compat/include/net/checksum.h \
linux/compat/include/net/dst.h \
+ linux/compat/include/net/flow_keys.h \
linux/compat/include/net/genetlink.h \
linux/compat/include/net/gre.h \
linux/compat/include/net/inet_frag.h \
linux/compat/include/net/protocol.h \
linux/compat/include/net/route.h \
linux/compat/include/net/sock.h \
- linux/compat/include/net/netns/generic.h
+ linux/compat/include/net/netns/generic.h \
+ linux/compat/include/net/vxlan.h
--- /dev/null
+/*
+ * Copyright (c) 2007-2013 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ * This code is derived from kernel flow_dissector.c
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/igmp.h>
+#include <linux/icmp.h>
+#include <linux/sctp.h>
+#include <linux/dccp.h>
+#include <linux/if_tunnel.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
+#include <net/flow_keys.h>
+
+
+/* copy saddr & daddr, possibly using 64bit load/store
+ * Equivalent to : flow->src = iph->saddr;
+ * flow->dst = iph->daddr;
+ */
+static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
+{
+ BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
+ offsetof(typeof(*flow), src) + sizeof(flow->src));
+ memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
+}
+
+static bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
+{
+ int poff, nhoff = skb_network_offset(skb);
+ u8 ip_proto;
+ __be16 proto = skb->protocol;
+
+ memset(flow, 0, sizeof(*flow));
+
+again:
+ switch (proto) {
+ case __constant_htons(ETH_P_IP): {
+ const struct iphdr *iph;
+ struct iphdr _iph;
+ip:
+ iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+ if (!iph)
+ return false;
+
+ if (ip_is_fragment(iph))
+ ip_proto = 0;
+ else
+ ip_proto = iph->protocol;
+ iph_to_flow_copy_addrs(flow, iph);
+ nhoff += iph->ihl * 4;
+ break;
+ }
+ case __constant_htons(ETH_P_IPV6): {
+ const struct ipv6hdr *iph;
+ struct ipv6hdr _iph;
+ipv6:
+ iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+ if (!iph)
+ return false;
+
+ ip_proto = iph->nexthdr;
+ flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
+ flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
+ nhoff += sizeof(struct ipv6hdr);
+ break;
+ }
+ case __constant_htons(ETH_P_8021Q): {
+ const struct vlan_hdr *vlan;
+ struct vlan_hdr _vlan;
+
+ vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan);
+ if (!vlan)
+ return false;
+
+ proto = vlan->h_vlan_encapsulated_proto;
+ nhoff += sizeof(*vlan);
+ goto again;
+ }
+ case __constant_htons(ETH_P_PPP_SES): {
+ struct {
+ struct pppoe_hdr hdr;
+ __be16 proto;
+ } *hdr, _hdr;
+ hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
+ if (!hdr)
+ return false;
+ proto = hdr->proto;
+ nhoff += PPPOE_SES_HLEN;
+ switch (proto) {
+ case __constant_htons(PPP_IP):
+ goto ip;
+ case __constant_htons(PPP_IPV6):
+ goto ipv6;
+ default:
+ return false;
+ }
+ }
+ default:
+ return false;
+ }
+
+ switch (ip_proto) {
+ case IPPROTO_GRE: {
+ struct gre_hdr {
+ __be16 flags;
+ __be16 proto;
+ } *hdr, _hdr;
+
+ hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
+ if (!hdr)
+ return false;
+ /*
+ * Only look inside GRE if version zero and no
+ * routing
+ */
+ if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
+ proto = hdr->proto;
+ nhoff += 4;
+ if (hdr->flags & GRE_CSUM)
+ nhoff += 4;
+ if (hdr->flags & GRE_KEY)
+ nhoff += 4;
+ if (hdr->flags & GRE_SEQ)
+ nhoff += 4;
+ if (proto == htons(ETH_P_TEB)) {
+ const struct ethhdr *eth;
+ struct ethhdr _eth;
+
+ eth = skb_header_pointer(skb, nhoff,
+ sizeof(_eth), &_eth);
+ if (!eth)
+ return false;
+ proto = eth->h_proto;
+ nhoff += sizeof(*eth);
+ }
+ goto again;
+ }
+ break;
+ }
+ case IPPROTO_IPIP:
+ goto again;
+ default:
+ break;
+ }
+
+ flow->ip_proto = ip_proto;
+ poff = proto_ports_offset(ip_proto);
+ if (poff >= 0) {
+ __be32 *ports, _ports;
+
+ nhoff += poff;
+ ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports);
+ if (ports)
+ flow->ports = *ports;
+ }
+
+ flow->thoff = (u16) nhoff;
+
+ return true;
+}
+
+static u32 hashrnd __read_mostly;
+
+static void init_hashrnd(void)
+{
+ if (likely(hashrnd))
+ return;
+ get_random_bytes(&hashrnd, sizeof(hashrnd));
+}
+
+u32 __skb_get_rxhash(struct sk_buff *skb)
+{
+ struct flow_keys keys;
+ u32 hash;
+
+ if (!skb_flow_dissect(skb, &keys))
+ return 0;
+
+ /* get a consistent hash (same value on both flow directions) */
+ if (((__force u32)keys.dst < (__force u32)keys.src) ||
+ (((__force u32)keys.dst == (__force u32)keys.src) &&
+ ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
+ swap(keys.dst, keys.src);
+ swap(keys.port16[0], keys.port16[1]);
+ }
+
+ init_hashrnd();
+
+ hash = jhash_3words((__force u32)keys.dst,
+ (__force u32)keys.src,
+ (__force u32)keys.ports, hashrnd);
+ if (!hash)
+ hash = 1;
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,34)
+ skb->rxhash = hash;
+#endif
+ return hash;
+}
+#endif
#include_next <linux/in.h>
+#include <linux/module.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
+static inline int proto_ports_offset(int proto)
+{
+ switch (proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_DCCP:
+ case IPPROTO_ESP: /* SPI */
+ case IPPROTO_SCTP:
+ case IPPROTO_UDPLITE:
+ return 0;
+ case IPPROTO_AH: /* SPI */
+ return 4;
+ default:
+ return -EINVAL;
+ }
+}
+#endif
+
#ifndef HAVE_IPV4_IS_MULTICAST
static inline bool ipv4_is_loopback(__be32 addr)
skb->mac_len = skb->network_header - skb->mac_header;
}
#endif
+
+#ifndef HAVE_SKB_UNCLONE
+static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
+{
+ might_sleep_if(pri & __GFP_WAIT);
+
+ if (skb_cloned(skb))
+ return pskb_expand_head(skb, 0, 0, pri);
+
+ return 0;
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
+extern u32 __skb_get_rxhash(struct sk_buff *skb);
+static inline __u32 skb_get_rxhash(struct sk_buff *skb)
+{
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,34)
+ if (!skb->rxhash)
+#endif
+ return __skb_get_rxhash(skb);
+}
+#endif
+
#endif
--- /dev/null
+#ifndef _NET_FLOW_KEYS_WRAPPER_H
+#define _NET_FLOW_KEYS_WRAPPER_H
+
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)
+#include_next <net/flow_keys.h>
+#else
+struct flow_keys {
+ /* (src,dst) must be grouped, in the same way than in IP header */
+ __be32 src;
+ __be32 dst;
+ union {
+ __be32 ports;
+ __be16 port16[2];
+ };
+ u16 thoff;
+ u8 ip_proto;
+};
+#endif
+
+#endif
#endif /* linux kernel < 2.6.25 */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)
+static inline bool ip_is_fragment(const struct iphdr *iph)
+{
+ return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
+}
+#endif
+
#endif
extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
int target, unsigned short *fragoff, int *fragflg);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)
+static inline u32 ipv6_addr_hash(const struct in6_addr *a)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ul = (const unsigned long *)a;
+ unsigned long x = ul[0] ^ ul[1];
+
+ return (u32)(x ^ (x >> 32));
+#else
+ return (__force u32)(a->s6_addr32[0] ^ a->s6_addr32[1] ^
+ a->s6_addr32[2] ^ a->s6_addr32[3]);
+#endif
+}
+#endif
+
#endif
#endif /* 2.6.29 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
-#define pernet_operations rpl_pernet_operations
-struct pernet_operations {
+struct rpl_pernet_operations {
int (*init)(struct net *net);
void (*exit)(struct net *net);
int *id;
size_t size;
+ struct pernet_operations ops;
};
+#define pernet_operations rpl_pernet_operations
+
+#define register_pernet_device rpl_register_pernet_gen_device
+#define unregister_pernet_device rpl_unregister_pernet_gen_device
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
extern int rpl_register_pernet_gen_device(struct rpl_pernet_operations *ops);
extern void rpl_unregister_pernet_gen_device(struct rpl_pernet_operations *ops);
-#define register_pernet_device rpl_register_pernet_gen_device
-#define unregister_pernet_device rpl_unregister_pernet_gen_device
+#else /* for 2.6.32* */
+
+int __net_init compat_init_net(struct net *net, struct rpl_pernet_operations *pnet);
+void __net_exit compat_exit_net(struct net *net, struct rpl_pernet_operations *pnet);
+
+#define DEFINE_COMPAT_PNET_REG_FUNC(TYPE) \
+ \
+static struct rpl_pernet_operations *pnet_gen_##TYPE; \
+static int __net_init compat_init_net_gen_##TYPE(struct net *net) \
+{ \
+ return compat_init_net(net, pnet_gen_##TYPE); \
+} \
+ \
+static void __net_exit compat_exit_net_gen_##TYPE(struct net *net) \
+{ \
+ compat_exit_net(net, pnet_gen_##TYPE); \
+} \
+ \
+static int __net_init rpl_register_pernet_gen_##TYPE(struct rpl_pernet_operations *rpl_pnet) \
+{ \
+ pnet_gen_##TYPE = rpl_pnet; \
+ rpl_pnet->ops.init = compat_init_net_gen_##TYPE; \
+ rpl_pnet->ops.exit = compat_exit_net_gen_##TYPE; \
+ return register_pernet_gen_##TYPE(pnet_gen_##TYPE->id, &rpl_pnet->ops); \
+} \
+ \
+static void __net_exit rpl_unregister_pernet_gen_##TYPE(struct rpl_pernet_operations *rpl_pnet) \
+{ \
+ unregister_pernet_gen_##TYPE(*pnet_gen_##TYPE->id, &rpl_pnet->ops); \
+}
+#endif
+#endif /* 2.6.33 */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) || \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)
+#define DEFINE_COMPAT_PNET_REG_FUNC(TYPE)
#endif /* 2.6.33 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
--- /dev/null
+#ifndef __NET_VXLAN_WRAPPER_H
+#define __NET_VXLAN_WRAPPER_H 1
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/udp.h>
+
+/* per UDP socket information */
+struct vxlan_sock {
+ struct hlist_node hlist;
+ struct rcu_head rcu;
+ struct socket *sock;
+ struct list_head handler_list;
+};
+
+struct vxlan_handler;
+typedef int (vxlan_rcv_t)(struct vxlan_handler *vh, struct sk_buff *skb, __be32 key);
+
+struct vxlan_handler {
+ vxlan_rcv_t *rcv;
+ struct list_head node;
+ void *data;
+ struct vxlan_sock *vs;
+ atomic_t refcnt;
+ struct rcu_head rcu;
+ struct work_struct del_work;
+ int priority;
+};
+
+void vxlan_handler_put(struct vxlan_handler *vh);
+
+struct vxlan_handler *vxlan_handler_add(struct net *net,
+ __be16 portno, vxlan_rcv_t *rcv,
+ void *data, int priority, bool create);
+
+int vxlan_xmit_skb(struct net *net, struct vxlan_handler *vh,
+ struct rtable *rt, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
+ __be16 src_port, __be16 dst_port, __be32 vni);
+
+__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb);
+
+#endif
#include <net/net_namespace.h>
#include <net/netns/generic.h>
-#undef pernet_operations
-
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,32)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
static int net_assign_generic(struct net *net, int id, void *data);
#endif
-static int __net_init compat_init_net(struct net *net, struct rpl_pernet_operations *pnet)
+int __net_init compat_init_net(struct net *net, struct rpl_pernet_operations *pnet)
{
int err;
void *ovs_net = kzalloc(pnet->size, GFP_KERNEL);
return err;
}
-static void __net_exit compat_exit_net(struct net *net, struct rpl_pernet_operations *pnet)
+void __net_exit compat_exit_net(struct net *net, struct rpl_pernet_operations *pnet)
{
void *ovs_net = net_generic(net, *pnet->id);
}
#endif
-#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32)
-#define DEFINE_PNET_REG_FUNC(PNET_TYPE) \
- static struct rpl_pernet_operations *pnet_##PNET_TYPE; \
-static int __net_init compat_init_net_##PNET_TYPE(struct net *net) \
-{ \
- return compat_init_net(net, pnet_##PNET_TYPE); \
-} \
- \
-static void __net_exit compat_exit_net_##PNET_TYPE(struct net *net) \
-{ \
- compat_exit_net(net, pnet_##PNET_TYPE); \
-} \
- \
-static struct pernet_operations pnet_compat_##PNET_TYPE = { \
- .init = compat_init_net_##PNET_TYPE, \
- .exit = compat_exit_net_##PNET_TYPE, \
-}; \
- \
-int rpl_register_pernet_##PNET_TYPE(struct rpl_pernet_operations *rpl_pnet) \
-{ \
- pnet_##PNET_TYPE = rpl_pnet; \
- return register_pernet_##PNET_TYPE(pnet_##PNET_TYPE->id, &pnet_compat_##PNET_TYPE); \
-} \
- \
-void rpl_unregister_pernet_##PNET_TYPE(struct rpl_pernet_operations *pnet) \
-{ \
- unregister_pernet_##PNET_TYPE(*pnet->id, &pnet_compat_##PNET_TYPE); \
-}
-
-DEFINE_PNET_REG_FUNC(gen_device);
-
-#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
-#define MAX_DATA_COUNT 1
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
+#define MAX_DATA_COUNT 2
static struct net *net;
static void *__ovs_net_data[MAX_DATA_COUNT];
--- /dev/null
+/*
+ * Copyright (c) 2007-2013 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ * This code is derived from kernel vxlan module.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/rculist.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/igmp.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/hash.h>
+#include <linux/ethtool.h>
+#include <net/arp.h>
+#include <net/ndisc.h>
+#include <net/ip.h>
+#include <net/ip_tunnels.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/rtnetlink.h>
+#include <net/route.h>
+#include <net/dsfield.h>
+#include <net/inet_ecn.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/vxlan.h>
+
+#include "checksum.h"
+#include "compat.h"
+#include "gso.h"
+#include "vlan.h"
+
+#define PORT_HASH_BITS 8
+#define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
+
+#define VXLAN_N_VID (1u << 24)
+#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
+/* IP header + UDP + VXLAN + Ethernet header */
+#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
+#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+
+#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
+
+/* VXLAN protocol header */
+struct vxlanhdr {
+ __be32 vx_flags;
+ __be32 vx_vni;
+};
+
+static int vxlan_net_id;
+
+/* per-network namespace private data for this module */
+struct vxlan_net {
+ struct hlist_head sock_list[PORT_HASH_SIZE];
+ struct mutex sock_lock; /* RTNL lock nests inside this lock. */
+};
+
+/* Socket hash table head */
+static inline struct hlist_head *vs_head(struct net *net, __be16 port)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
+ return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
+}
+
+/* Find VXLAN socket based on network namespace and UDP port */
+static struct vxlan_sock *vxlan_find_port(struct net *net, __be16 port)
+{
+ struct vxlan_sock *vs;
+
+ hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
+ if (inet_sport(vs->sock->sk) == port)
+ return vs;
+ }
+ return NULL;
+}
+
+/* Callback from net/ipv4/udp.c to receive packets */
+static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct vxlan_handler *vh;
+ struct vxlan_sock *vs;
+ struct vxlanhdr *vxh;
+
+ /* Need Vxlan and inner Ethernet header to be present */
+ if (!pskb_may_pull(skb, VXLAN_HLEN))
+ goto error;
+
+ /* Return packets with reserved bits set */
+ vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
+ if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
+ (vxh->vx_vni & htonl(0xff))) {
+ pr_warn("invalid vxlan flags=%#x vni=%#x\n",
+ ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
+ goto error;
+ }
+
+ if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
+ goto drop;
+
+ vs = vxlan_find_port(sock_net(sk), inet_sport(sk));
+ if (!vs)
+ goto drop;
+
+ list_for_each_entry_rcu(vh, &vs->handler_list, node) {
+ if (vh->rcv(vh, skb, vxh->vx_vni) == PACKET_RCVD)
+ return 0;
+ }
+
+drop:
+ /* Consume bad packet */
+ kfree_skb(skb);
+ return 0;
+
+error:
+ /* Return non vxlan pkt */
+ return 1;
+}
+
+static void vxlan_sock_put(struct sk_buff *skb)
+{
+ sock_put(skb->sk);
+}
+
+/* On transmit, associate with the tunnel socket */
+static void vxlan_set_owner(struct sock *sk, struct sk_buff *skb)
+{
+ skb_orphan(skb);
+ sock_hold(sk);
+ skb->sk = sk;
+ skb->destructor = vxlan_sock_put;
+}
+
+/* Compute source port for outgoing packet
+ * first choice to use L4 flow hash since it will spread
+ * better and maybe available from hardware
+ * secondary choice is to use jhash on the Ethernet header
+ */
+__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
+{
+ unsigned int range = (port_max - port_min) + 1;
+ u32 hash;
+
+ hash = skb_get_rxhash(skb);
+ if (!hash)
+ hash = jhash(skb->data, 2 * ETH_ALEN,
+ (__force u32) skb->protocol);
+
+ return htons((((u64) hash * range) >> 32) + port_min);
+}
+
+static void vxlan_gso(struct sk_buff *skb)
+{
+ int udp_offset = skb_transport_offset(skb);
+ struct udphdr *uh;
+
+ uh = udp_hdr(skb);
+ uh->len = htons(skb->len - udp_offset);
+
+ /* csum segment if tunnel sets skb with csum. */
+ if (unlikely(uh->check)) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - udp_offset,
+ IPPROTO_UDP, 0);
+ uh->check = csum_fold(skb_checksum(skb, udp_offset,
+ skb->len - udp_offset, 0));
+
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+
+ }
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
+static int handle_offloads(struct sk_buff *skb)
+{
+ if (skb_is_gso(skb)) {
+ OVS_GSO_CB(skb)->fix_segment = vxlan_gso;
+ } else {
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+ return 0;
+}
+
+int vxlan_xmit_skb(struct net *net, struct vxlan_handler *vh,
+ struct rtable *rt, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
+ __be16 src_port, __be16 dst_port, __be32 vni)
+{
+ struct vxlanhdr *vxh;
+ struct udphdr *uh;
+ int min_headroom;
+ int err;
+
+ skb_reset_inner_headers(skb);
+
+ min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
+ + VXLAN_HLEN + sizeof(struct iphdr)
+ + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+
+ /* Need space for new headers (invalidates iph ptr) */
+ err = skb_cow_head(skb, min_headroom);
+ if (unlikely(err))
+ return err;
+
+ if (unlikely(vlan_deaccel_tag(skb)))
+ return -ENOMEM;
+
+ vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
+ vxh->vx_flags = htonl(VXLAN_FLAGS);
+ vxh->vx_vni = vni;
+
+ __skb_push(skb, sizeof(*uh));
+ skb_reset_transport_header(skb);
+ uh = udp_hdr(skb);
+
+ uh->dest = dst_port;
+ uh->source = src_port;
+
+ uh->len = htons(skb->len);
+ uh->check = 0;
+
+ vxlan_set_owner(vh->vs->sock->sk, skb);
+
+ err = handle_offloads(skb);
+ if (err)
+ return err;
+
+ return iptunnel_xmit(net, rt, skb, src, dst,
+ IPPROTO_UDP, tos, ttl, df);
+}
+
+static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port)
+{
+ struct vxlan_sock *vs;
+ struct sock *sk;
+ struct sockaddr_in vxlan_addr = {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = htonl(INADDR_ANY),
+ .sin_port = port,
+ };
+ int rc;
+
+ vs = kmalloc(sizeof(*vs), GFP_KERNEL);
+ if (!vs)
+ return ERR_PTR(-ENOMEM);
+
+ /* Create UDP socket for encapsulation receive. */
+ rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vs->sock);
+ if (rc < 0) {
+ pr_debug("UDP socket create failed\n");
+ kfree(vs);
+ return ERR_PTR(rc);
+ }
+
+ /* Put in proper namespace */
+ sk = vs->sock->sk;
+ sk_change_net(sk, net);
+
+ rc = kernel_bind(vs->sock, (struct sockaddr *) &vxlan_addr,
+ sizeof(vxlan_addr));
+ if (rc < 0) {
+ pr_debug("bind for UDP socket %pI4:%u (%d)\n",
+ &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
+ sk_release_kernel(sk);
+ kfree(vs);
+ return ERR_PTR(rc);
+ }
+
+ /* Disable multicast loopback */
+ inet_sk(sk)->mc_loop = 0;
+ INIT_LIST_HEAD(&vs->handler_list);
+ hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
+
+ /* Mark socket as an encapsulation socket. */
+ udp_sk(sk)->encap_type = 1;
+ udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
+ udp_encap_enable();
+
+ return vs;
+}
+
+static void rcu_free_vs_callback(struct rcu_head *rcu)
+{
+ struct vxlan_sock *vs = container_of(rcu, struct vxlan_sock, rcu);
+
+ kfree(vs);
+}
+
+static void vxlan_socket_del(struct vxlan_sock *vs)
+{
+ if (list_empty(&vs->handler_list)) {
+ hlist_del_rcu(&vs->hlist);
+
+ sk_release_kernel(vs->sock->sk);
+ call_rcu(&vs->rcu, rcu_free_vs_callback);
+ }
+}
+
+static int vxlan_init_module(void);
+static void vxlan_cleanup_module(void);
+
+static void rcu_free_vh_callback(struct rcu_head *rcu)
+{
+ struct vxlan_handler *vh = container_of(rcu, struct vxlan_handler, rcu);
+
+ kfree(vh);
+}
+
+static void vh_del_work(struct work_struct *work)
+{
+ struct vxlan_handler *vh = container_of(work, struct vxlan_handler, del_work);
+ struct vxlan_sock *vs = vh->vs;
+ struct net *net = sock_net(vs->sock->sk);
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
+ mutex_lock(&vn->sock_lock);
+
+ list_del_rcu(&vh->node);
+ call_rcu(&vh->rcu, rcu_free_vh_callback);
+ vxlan_socket_del(vs);
+
+ mutex_unlock(&vn->sock_lock);
+
+ vxlan_cleanup_module();
+}
+
+struct vxlan_handler *vxlan_handler_add(struct net *net,
+ __be16 portno, vxlan_rcv_t *rcv,
+ void *data, int priority, bool create)
+{
+ struct vxlan_net *vn;
+ struct vxlan_sock *vs;
+ struct vxlan_handler *vh;
+ struct vxlan_handler *new;
+ int err;
+
+ err = vxlan_init_module();
+ if (err)
+ return ERR_PTR(err);
+
+ vn = net_generic(net, vxlan_net_id);
+ mutex_lock(&vn->sock_lock);
+ /* Look to see if can reuse socket */
+ vs = vxlan_find_port(net, portno);
+ if (!vs) {
+ vs = vxlan_socket_create(net, portno);
+ if (IS_ERR(vs)) {
+ new = (void *) vs;
+ goto out;
+ }
+ }
+
+ /* Try existing vxlan hanlders for this socket. */
+ list_for_each_entry(vh, &vs->handler_list, node) {
+ if (vh->rcv == rcv) {
+ if (create) {
+ vxlan_socket_del(vs);
+ new = ERR_PTR(-EEXIST);
+ goto out;
+ }
+ atomic_inc(&vh->refcnt);
+ new = vh;
+ goto out;
+ }
+ }
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new) {
+ vxlan_socket_del(vs);
+ new = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ new->rcv = rcv;
+ new->vs = vs;
+ atomic_set(&new->refcnt, 1);
+ INIT_WORK(&new->del_work, vh_del_work);
+ new->data = data;
+ new->priority = priority;
+
+ list_for_each_entry(vh, &vs->handler_list, node) {
+ if (vh->priority > priority) {
+ list_add_tail_rcu(&new->node, &vh->node);
+ goto out;
+ }
+ }
+
+ list_add_tail_rcu(&new->node, &vs->handler_list);
+out:
+ mutex_unlock(&vn->sock_lock);
+ return new;
+}
+
+void vxlan_handler_put(struct vxlan_handler *vh)
+{
+ BUG_ON(!vh->vs);
+
+ if (atomic_dec_and_test(&vh->refcnt))
+ queue_work(&vh->del_work);
+}
+
+static __net_init int vxlan_init_net(struct net *net)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ unsigned int h;
+
+ mutex_init(&vn->sock_lock);
+
+ for (h = 0; h < PORT_HASH_SIZE; ++h)
+ INIT_HLIST_HEAD(&vn->sock_list[h]);
+
+ return 0;
+}
+
+static struct pernet_operations vxlan_net_ops = {
+ .init = vxlan_init_net,
+ .id = &vxlan_net_id,
+ .size = sizeof(struct vxlan_net),
+};
+
+static int refcnt;
+static DEFINE_MUTEX(init_lock);
+DEFINE_COMPAT_PNET_REG_FUNC(device);
+
+static int vxlan_init_module(void)
+{
+ int err = 0;
+
+ mutex_lock(&init_lock);
+ if (refcnt)
+ goto out;
+ err = register_pernet_device(&vxlan_net_ops);
+out:
+ if (!err)
+ refcnt++;
+ mutex_unlock(&init_lock);
+ return err;
+}
+
+static void vxlan_cleanup_module(void)
+{
+ mutex_lock(&init_lock);
+ refcnt--;
+ if (refcnt)
+ goto out;
+ unregister_pernet_device(&vxlan_net_ops);
+out:
+ mutex_unlock(&init_lock);
+}
#include <net/icmp.h>
#include <net/ip.h>
#include <net/udp.h>
+#include <net/ip_tunnels.h>
+#include <net/udp.h>
+#include <net/rtnetlink.h>
+#include <net/route.h>
+#include <net/dsfield.h>
+#include <net/inet_ecn.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/vxlan.h>
#include "datapath.h"
#include "tunnel.h"
#include "vport.h"
-#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
-
-/**
- * struct vxlanhdr - VXLAN header
- * @vx_flags: Must have the exact value %VXLAN_FLAGS.
- * @vx_vni: VXLAN Network Identifier (VNI) in top 24 bits, low 8 bits zeroed.
- */
-struct vxlanhdr {
- __be32 vx_flags;
- __be32 vx_vni;
-};
-
-#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+#define OVS_VXLAN_RCV_PRIORITY 8
/**
* struct vxlan_port - Keeps track of open UDP ports
- * @dst_port: vxlan UDP port no.
- * @list: list element in @vxlan_ports.
- * @vxlan_rcv_socket: The socket created for this port number.
+ * @vh: vxlan_handler created for the port.
* @name: vport name.
*/
struct vxlan_port {
- __be16 dst_port;
- struct list_head list;
- struct socket *vxlan_rcv_socket;
+ struct vxlan_handler *vh;
char name[IFNAMSIZ];
};
-static LIST_HEAD(vxlan_ports);
-
static inline struct vxlan_port *vxlan_vport(const struct vport *vport)
{
return vport_priv(vport);
}
-static struct vxlan_port *vxlan_find_port(struct net *net, __be16 port)
-{
- struct vxlan_port *vxlan_port;
-
- list_for_each_entry_rcu(vxlan_port, &vxlan_ports, list) {
-
- if (vxlan_port->dst_port == port &&
- net_eq(sock_net(vxlan_port->vxlan_rcv_socket->sk), net))
- return vxlan_port;
- }
-
- return NULL;
-}
-
-static inline struct vxlanhdr *vxlan_hdr(const struct sk_buff *skb)
-{
- return (struct vxlanhdr *)(udp_hdr(skb) + 1);
-}
-
-static void vxlan_build_header(const struct vport *vport,
- struct sk_buff *skb,
- int tunnel_hlen)
-{
- struct vxlan_port *vxlan_port = vxlan_vport(vport);
- struct udphdr *udph = udp_hdr(skb);
- struct vxlanhdr *vxh = (struct vxlanhdr *)(udph + 1);
- const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key;
-
- udph->dest = vxlan_port->dst_port;
- udph->source = htons(ovs_tnl_get_src_port(skb));
- udph->check = 0;
- udph->len = htons(skb->len - skb_transport_offset(skb));
-
- vxh->vx_flags = htonl(VXLAN_FLAGS);
- vxh->vx_vni = htonl(be64_to_cpu(tun_key->tun_id) << 8);
-}
-
/* Called with rcu_read_lock and BH disabled. */
-static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
+static int vxlan_rcv(struct vxlan_handler *vh, struct sk_buff *skb, __be32 vx_vni)
{
- struct vxlan_port *vxlan_vport;
- struct vxlanhdr *vxh;
+ struct vport *vport = vh->data;
struct iphdr *iph;
struct ovs_key_ipv4_tunnel tun_key;
__be64 key;
- vxlan_vport = vxlan_find_port(dev_net(skb->dev), udp_hdr(skb)->dest);
- if (unlikely(!vxlan_vport))
- goto error;
-
- if (unlikely(!pskb_may_pull(skb, VXLAN_HLEN + ETH_HLEN)))
- goto error;
-
- vxh = vxlan_hdr(skb);
- if (unlikely(vxh->vx_flags != htonl(VXLAN_FLAGS) ||
- vxh->vx_vni & htonl(0xff)))
- goto error;
-
- skb_pull_rcsum(skb, VXLAN_HLEN);
-
- key = cpu_to_be64(ntohl(vxh->vx_vni) >> 8);
-
/* Save outer tunnel values */
iph = ip_hdr(skb);
+ key = cpu_to_be64(ntohl(vx_vni) >> 8);
tnl_tun_key_init(&tun_key, iph, key, TUNNEL_KEY);
- ovs_tnl_rcv(vport_from_priv(vxlan_vport), skb, &tun_key);
- goto out;
-
-error:
- kfree_skb(skb);
-out:
- return 0;
-}
-
-/* Random value. Irrelevant as long as it's not 0 since we set the handler. */
-#define UDP_ENCAP_VXLAN 1
-static int vxlan_socket_init(struct vxlan_port *vxlan_port, struct net *net)
-{
- struct sockaddr_in sin;
- int err;
-
- err = sock_create_kern(AF_INET, SOCK_DGRAM, 0,
- &vxlan_port->vxlan_rcv_socket);
- if (err)
- goto error;
-
- /* release net ref. */
- sk_change_net(vxlan_port->vxlan_rcv_socket->sk, net);
-
- sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = htonl(INADDR_ANY);
- sin.sin_port = vxlan_port->dst_port;
-
- err = kernel_bind(vxlan_port->vxlan_rcv_socket, (struct sockaddr *)&sin,
- sizeof(struct sockaddr_in));
- if (err)
- goto error_sock;
-
- udp_sk(vxlan_port->vxlan_rcv_socket->sk)->encap_type = UDP_ENCAP_VXLAN;
- udp_sk(vxlan_port->vxlan_rcv_socket->sk)->encap_rcv = vxlan_rcv;
-
- udp_encap_enable();
-
- return 0;
-
-error_sock:
- sk_release_kernel(vxlan_port->vxlan_rcv_socket->sk);
-error:
- pr_warn("cannot register vxlan protocol handler\n");
- return err;
+ ovs_vport_receive(vport, skb, &tun_key);
+ return PACKET_RCVD;
}
static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
{
struct vxlan_port *vxlan_port = vxlan_vport(vport);
+ __be16 dst_port = inet_sport(vxlan_port->vh->vs->sock->sk);
- if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(vxlan_port->dst_port)))
+ if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
return -EMSGSIZE;
return 0;
}
{
struct vxlan_port *vxlan_port = vxlan_vport(vport);
- list_del_rcu(&vxlan_port->list);
- /* Release socket */
- sk_release_kernel(vxlan_port->vxlan_rcv_socket->sk);
+ vxlan_handler_put(vxlan_port->vh);
ovs_vport_deferred_free(vport);
}
struct net *net = ovs_dp_get_net(parms->dp);
struct nlattr *options = parms->options;
struct vxlan_port *vxlan_port;
+ struct vxlan_handler *vh;
struct vport *vport;
struct nlattr *a;
- int err;
u16 dst_port;
+ int err;
if (!options) {
err = -EINVAL;
goto error;
}
- /* Verify if we already have a socket created for this port */
- if (vxlan_find_port(net, htons(dst_port))) {
- err = -EEXIST;
- goto error;
- }
-
vport = ovs_vport_alloc(sizeof(struct vxlan_port),
&ovs_vxlan_vport_ops, parms);
if (IS_ERR(vport))
return vport;
vxlan_port = vxlan_vport(vport);
- vxlan_port->dst_port = htons(dst_port);
strncpy(vxlan_port->name, parms->name, IFNAMSIZ);
- err = vxlan_socket_init(vxlan_port, net);
- if (err)
- goto error_free;
+ vh = vxlan_handler_add(net, htons(dst_port), vxlan_rcv,
+ vport, OVS_VXLAN_RCV_PRIORITY, true);
+ if (IS_ERR(vh)) {
+ ovs_vport_free(vport);
+ return (void *)vh;
+ }
+ vxlan_port->vh = vh;
- list_add_tail_rcu(&vxlan_port->list, &vxlan_ports);
return vport;
-error_free:
- ovs_vport_free(vport);
error:
return ERR_PTR(err);
}
static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
{
- if (unlikely(!OVS_CB(skb)->tun_key))
- return -EINVAL;
+ struct vxlan_port *vxlan_port = vxlan_vport(vport);
+ __be16 dst_port = inet_sport(vxlan_port->vh->vs->sock->sk);
+ struct net *net = ovs_dp_get_net(vport->dp);
+ struct rtable *rt;
+ __be16 src_port;
+ __be32 saddr;
+ __be16 df;
+ int port_min;
+ int port_max;
+ int err;
+
+ if (unlikely(!OVS_CB(skb)->tun_key)) {
+ err = -EINVAL;
+ goto error;
+ }
- return ovs_tnl_send(vport, skb, IPPROTO_UDP,
- VXLAN_HLEN, vxlan_build_header);
+ forward_ip_summed(skb, true);
+
+ /* Route lookup */
+ saddr = OVS_CB(skb)->tun_key->ipv4_src;
+ rt = find_route(ovs_dp_get_net(vport->dp),
+ &saddr,
+ OVS_CB(skb)->tun_key->ipv4_dst,
+ IPPROTO_UDP,
+ OVS_CB(skb)->tun_key->ipv4_tos,
+ skb_get_mark(skb));
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ goto error;
+ }
+
+ df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
+ htons(IP_DF) : 0;
+
+ skb->local_df = 1;
+
+ inet_get_local_port_range(&port_min, &port_max);
+ src_port = vxlan_src_port(port_min, port_max, skb);
+
+ err = vxlan_xmit_skb(net, vxlan_port->vh, rt, skb,
+ saddr, OVS_CB(skb)->tun_key->ipv4_dst,
+ OVS_CB(skb)->tun_key->ipv4_tos,
+ OVS_CB(skb)->tun_key->ipv4_ttl, df,
+ src_port, dst_port,
+ htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8));
+ if (err < 0)
+ ip_rt_put(rt);
+error:
+ return err;
}
static const char *vxlan_get_name(const struct vport *vport)
int pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *mutex)
OVS_MUST_HOLD(mutex);
-/* Sparse complains about the proper PTHREAD_MUTEX_INITIALIZER definition.
+/* Sparse complains about the proper PTHREAD_*_INITIALIZER definitions.
* Luckily, it's not a real compiler so we can overwrite it with something
* simple. */
#undef PTHREAD_MUTEX_INITIALIZER
#define PTHREAD_MUTEX_INITIALIZER {}
+#undef PTHREAD_RWLOCK_INITIALIZER
+#define PTHREAD_RWLOCK_INITIALIZER {}
+
+#undef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
+#define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP {}
+
+#undef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
+#define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP {}
+
#define pthread_mutex_trylock(MUTEX) \
({ \
int retval = pthread_mutex_trylock(mutex); \
{
struct bond *bond = CONST_CAST(struct bond *, bond_);
- ovs_assert(bond->ref_cnt > 0);
- bond->ref_cnt++;
+ if (bond) {
+ ovs_assert(bond->ref_cnt > 0);
+ bond->ref_cnt++;
+ }
return bond;
}
#define CLS_CURSOR_FOR_EACH(RULE, MEMBER, CURSOR) \
for (ASSIGN_CONTAINER(RULE, cls_cursor_first(CURSOR), MEMBER); \
- &(RULE)->MEMBER != NULL; \
+ RULE != OBJECT_CONTAINING(NULL, RULE, MEMBER); \
ASSIGN_CONTAINER(RULE, cls_cursor_next(CURSOR, &(RULE)->MEMBER), \
MEMBER))
#define CLS_CURSOR_FOR_EACH_SAFE(RULE, NEXT, MEMBER, CURSOR) \
for (ASSIGN_CONTAINER(RULE, cls_cursor_first(CURSOR), MEMBER); \
- (&(RULE)->MEMBER != NULL \
+ (RULE != OBJECT_CONTAINING(NULL, RULE, MEMBER) \
? ASSIGN_CONTAINER(NEXT, cls_cursor_next(CURSOR, &(RULE)->MEMBER), \
- MEMBER) \
+ MEMBER), 1 \
: 0); \
(RULE) = (NEXT))
#line 2 "@srcdir@/lib/dirs.c.in"
/*
- * Copyright (c) 2008, 2009, 2010, 2011, 2012 Nicira, Inc.
+ * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <config.h>
#include "dirs.h"
#include <stdlib.h>
+#include "ovs-thread.h"
#include "util.h"
struct directory {
const char *value; /* Actual value; NULL if not yet determined. */
const char *default_value; /* Default value. */
const char *var_name; /* Environment variable to override default. */
+ struct ovsthread_once once; /* Ensures 'value' gets initialized once. */
};
static const char *
get_dir(struct directory *d)
{
- if (!d->value) {
+ if (ovsthread_once_start(&d->once)) {
d->value = getenv(d->var_name);
if (!d->value || !d->value[0]) {
d->value = d->default_value;
}
+ ovsthread_once_done(&d->once);
}
return d->value;
}
const char *
ovs_sysconfdir(void)
{
- static struct directory d = { NULL, @sysconfdir@, "OVS_SYSCONFDIR" };
+ static struct directory d = {
+ NULL, @sysconfdir@, "OVS_SYSCONFDIR", OVSTHREAD_ONCE_INITIALIZER
+ };
+
return get_dir(&d);
}
const char *
ovs_pkgdatadir(void)
{
- static struct directory d = { NULL, @pkgdatadir@, "OVS_PKGDATADIR" };
+ static struct directory d = {
+ NULL, @pkgdatadir@, "OVS_PKGDATADIR", OVSTHREAD_ONCE_INITIALIZER
+ };
+
return get_dir(&d);
}
const char *
ovs_rundir(void)
{
- static struct directory d = { NULL, @RUNDIR@, "OVS_RUNDIR" };
+ static struct directory d = {
+ NULL, @RUNDIR@, "OVS_RUNDIR", OVSTHREAD_ONCE_INITIALIZER
+ };
+
return get_dir(&d);
}
const char *
ovs_logdir(void)
{
- static struct directory d = { NULL, @LOGDIR@, "OVS_LOGDIR" };
+ static struct directory d = {
+ NULL, @LOGDIR@, "OVS_LOGDIR", OVSTHREAD_ONCE_INITIALIZER
+ };
+
return get_dir(&d);
}
const char *
ovs_dbdir(void)
{
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
static const char *dbdir;
- if (!dbdir) {
+
+ if (ovsthread_once_start(&once)) {
dbdir = getenv("OVS_DBDIR");
if (!dbdir || !dbdir[0]) {
char *sysconfdir = getenv("OVS_SYSCONFDIR");
? xasprintf("%s/openvswitch", sysconfdir)
: @DBDIR@);
}
+ ovsthread_once_done(&once);
}
return dbdir;
}
const char *
ovs_bindir(void)
{
- static struct directory d = { NULL, @bindir@, "OVS_BINDIR" };
+ static struct directory d = {
+ NULL, @bindir@, "OVS_BINDIR", OVSTHREAD_ONCE_INITIALIZER
+ };
+
return get_dir(&d);
}
static int
dpif_linux_init(void)
{
- static int error = -1;
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
+ static int error;
- if (error < 0) {
+ if (ovsthread_once_start(&once)) {
unsigned int ovs_vport_mcgroup;
error = nl_lookup_genl_family(OVS_DATAPATH_FAMILY,
nln = nln_create(NETLINK_GENERIC, ovs_vport_mcgroup,
dpif_linux_nln_parse, &vport);
}
+
+ ovsthread_once_done(&once);
}
return error;
char *name;
int open_cnt;
bool destroyed;
+ int max_mtu; /* Maximum MTU of any port added so far. */
struct dp_netdev_queue queues[N_QUEUES];
struct hmap flow_table; /* Flow table. */
/* All netdev-based datapaths. */
static struct shash dp_netdevs = SHASH_INITIALIZER(&dp_netdevs);
-/* Maximum port MTU seen so far. */
-static int max_mtu = ETH_PAYLOAD_MAX;
-
static int get_port_by_number(struct dp_netdev *, odp_port_t port_no,
struct dp_netdev_port **portp);
static int get_port_by_name(struct dp_netdev *, const char *devname,
dp->class = class;
dp->name = xstrdup(name);
dp->open_cnt = 0;
+ dp->max_mtu = ETH_PAYLOAD_MAX;
for (i = 0; i < N_QUEUES; i++) {
dp->queues[i].head = dp->queues[i].tail = 0;
}
port->type = xstrdup(type);
error = netdev_get_mtu(netdev, &mtu);
- if (!error && mtu > max_mtu) {
- max_mtu = mtu;
+ if (!error && mtu > dp->max_mtu) {
+ dp->max_mtu = mtu;
}
list_push_back(&dp->port_list, &port->node);
struct dp_netdev_port *port;
struct ofpbuf packet;
- ofpbuf_init(&packet, DP_NETDEV_HEADROOM + VLAN_ETH_HEADER_LEN + max_mtu);
+ ofpbuf_init(&packet,
+ DP_NETDEV_HEADROOM + VLAN_ETH_HEADER_LEN + dp->max_mtu);
LIST_FOR_EACH (port, node, &dp->port_list) {
int error;
static void
dp_initialize(void)
{
- static int status = -1;
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
- if (status < 0) {
+ if (ovsthread_once_start(&once)) {
int i;
- status = 0;
for (i = 0; i < ARRAY_SIZE(base_dpif_classes); i++) {
dp_register_provider(base_dpif_classes[i]);
}
+ ovsthread_once_done(&once);
}
}
/*
- * Copyright (c) 2012 Nicira, Inc.
+ * Copyright (c) 2012, 2013 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#define HEAP_FOR_EACH(NODE, MEMBER, HEAP) \
for (((HEAP)->n > 0 \
? ASSIGN_CONTAINER(NODE, (HEAP)->array[1], MEMBER) \
- : ((NODE) = NULL, 1)); \
+ : ((NODE) = NULL, (void) 0)); \
(NODE) != NULL; \
((NODE)->MEMBER.idx < (HEAP)->n \
? ASSIGN_CONTAINER(NODE, \
(HEAP)->array[(NODE)->MEMBER.idx + 1], \
MEMBER) \
- : ((NODE) = NULL, 1)))
+ : ((NODE) = NULL, (void) 0)))
\f
/* Returns the index of the node that is the parent of the node with the given
* 'idx' within a heap. */
*/
#define HINDEX_FOR_EACH_WITH_HASH(NODE, MEMBER, HASH, HINDEX) \
for (ASSIGN_CONTAINER(NODE, hindex_node_with_hash(HINDEX, HASH), MEMBER); \
- &(NODE)->MEMBER != NULL; \
+ NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
ASSIGN_CONTAINER(NODE, (NODE)->MEMBER.s, MEMBER))
struct hindex_node *hindex_node_with_hash(const struct hindex *, size_t hash);
/* Iterates through every node in HINDEX. */
#define HINDEX_FOR_EACH(NODE, MEMBER, HINDEX) \
for (ASSIGN_CONTAINER(NODE, hindex_first(HINDEX), MEMBER); \
- &(NODE)->MEMBER != NULL; \
+ NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
ASSIGN_CONTAINER(NODE, hindex_next(HINDEX, &(NODE)->MEMBER), MEMBER))
/* Safe when NODE may be freed (not needed when NODE may be removed from the
* hash index but its members remain accessible and intact). */
#define HINDEX_FOR_EACH_SAFE(NODE, NEXT, MEMBER, HINDEX) \
for (ASSIGN_CONTAINER(NODE, hindex_first(HINDEX), MEMBER); \
- (&(NODE)->MEMBER != NULL \
- ? ASSIGN_CONTAINER(NEXT, hindex_next(HINDEX, &(NODE)->MEMBER), MEMBER) \
+ (NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER) \
+ ? ASSIGN_CONTAINER(NEXT, hindex_next(HINDEX, &(NODE)->MEMBER), MEMBER), 1 \
: 0); \
(NODE) = (NEXT))
*/
#define HMAP_FOR_EACH_WITH_HASH(NODE, MEMBER, HASH, HMAP) \
for (ASSIGN_CONTAINER(NODE, hmap_first_with_hash(HMAP, HASH), MEMBER); \
- &(NODE)->MEMBER != NULL; \
+ NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
ASSIGN_CONTAINER(NODE, hmap_next_with_hash(&(NODE)->MEMBER), \
MEMBER))
#define HMAP_FOR_EACH_IN_BUCKET(NODE, MEMBER, HASH, HMAP) \
for (ASSIGN_CONTAINER(NODE, hmap_first_in_bucket(HMAP, HASH), MEMBER); \
- &(NODE)->MEMBER != NULL; \
+ NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
ASSIGN_CONTAINER(NODE, hmap_next_in_bucket(&(NODE)->MEMBER), MEMBER))
static inline struct hmap_node *hmap_first_with_hash(const struct hmap *,
/* Iterates through every node in HMAP. */
#define HMAP_FOR_EACH(NODE, MEMBER, HMAP) \
for (ASSIGN_CONTAINER(NODE, hmap_first(HMAP), MEMBER); \
- &(NODE)->MEMBER != NULL; \
+ NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
ASSIGN_CONTAINER(NODE, hmap_next(HMAP, &(NODE)->MEMBER), MEMBER))
/* Safe when NODE may be freed (not needed when NODE may be removed from the
* hash map but its members remain accessible and intact). */
#define HMAP_FOR_EACH_SAFE(NODE, NEXT, MEMBER, HMAP) \
for (ASSIGN_CONTAINER(NODE, hmap_first(HMAP), MEMBER); \
- (&(NODE)->MEMBER != NULL \
- ? ASSIGN_CONTAINER(NEXT, hmap_next(HMAP, &(NODE)->MEMBER), MEMBER) \
+ (NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER) \
+ ? ASSIGN_CONTAINER(NEXT, hmap_next(HMAP, &(NODE)->MEMBER), MEMBER), 1 \
: 0); \
(NODE) = (NEXT))
/* Continues an iteration from just after NODE. */
#define HMAP_FOR_EACH_CONTINUE(NODE, MEMBER, HMAP) \
for (ASSIGN_CONTAINER(NODE, hmap_next(HMAP, &(NODE)->MEMBER), MEMBER); \
- &(NODE)->MEMBER != NULL; \
+ NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
ASSIGN_CONTAINER(NODE, hmap_next(HMAP, &(NODE)->MEMBER), MEMBER))
static inline struct hmap_node *hmap_first(const struct hmap *);
#include "json.h"
#include "list.h"
#include "ofpbuf.h"
+#include "ovs-thread.h"
#include "poll-loop.h"
#include "reconnect.h"
#include "stream.h"
static struct json *
jsonrpc_create_id(void)
{
- static unsigned int id;
- return json_integer_create(id++);
+ static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
+ static unsigned int next_id;
+ unsigned int id;
+
+ xpthread_mutex_lock(&mutex);
+ id = next_id++;
+ xpthread_mutex_unlock(&mutex);
+
+ return json_integer_create(id);
}
struct jsonrpc_msg *
for (ASSIGN_CONTAINER(ITER, (ITER)->MEMBER.prev, MEMBER); \
&(ITER)->MEMBER != (LIST); \
ASSIGN_CONTAINER(ITER, (ITER)->MEMBER.prev, MEMBER))
-#define LIST_FOR_EACH_SAFE(ITER, NEXT, MEMBER, LIST) \
- for (ASSIGN_CONTAINER(ITER, (LIST)->next, MEMBER); \
- (&(ITER)->MEMBER != (LIST) \
- ? ASSIGN_CONTAINER(NEXT, (ITER)->MEMBER.next, MEMBER) \
- : 0); \
+#define LIST_FOR_EACH_SAFE(ITER, NEXT, MEMBER, LIST) \
+ for (ASSIGN_CONTAINER(ITER, (LIST)->next, MEMBER); \
+ (&(ITER)->MEMBER != (LIST) \
+ ? ASSIGN_CONTAINER(NEXT, (ITER)->MEMBER.next, MEMBER), 1 \
+ : 0); \
(ITER) = (NEXT))
#endif /* list.h */
#include "coverage.h"
#include "hash.h"
#include "hmap.h"
+#include "ovs-thread.h"
#include "timeval.h"
#include "util.h"
#include "vlog.h"
* once. */
static struct hmap lock_table = HMAP_INITIALIZER(&lock_table);
+/* Protects 'lock_table'. */
+static pthread_mutex_t lock_table_mutex = PTHREAD_MUTEX_INITIALIZER;
+
static void lockfile_unhash(struct lockfile *);
static int lockfile_try_lock(const char *name, pid_t *pidp,
struct lockfile **lockfilep);
lock_name = lockfile_name(file);
+ xpthread_mutex_lock(&lock_table_mutex);
error = lockfile_try_lock(lock_name, &pid, lockfilep);
+ xpthread_mutex_unlock(&lock_table_mutex);
if (error) {
COVERAGE_INC(lockfile_error);
lockfile_unlock(struct lockfile *lockfile)
{
if (lockfile) {
- COVERAGE_INC(lockfile_unlock);
+ xpthread_mutex_lock(&lock_table_mutex);
lockfile_unhash(lockfile);
+ xpthread_mutex_unlock(&lock_table_mutex);
+
+ COVERAGE_INC(lockfile_unlock);
free(lockfile->name);
free(lockfile);
}
/* Runs the memory monitor.
*
- * The client should call memory_should_report() afterward. */
+ * The client should call memory_should_report() afterward.
+ *
+ * This function, and the remainder of this module's interface, should be
+ * called from only a single thread. */
void
memory_run(void)
{
#include "dynamic-string.h"
#include "ofp-errors.h"
#include "ofp-util.h"
+#include "ovs-thread.h"
#include "packets.h"
#include "random.h"
#include "shash.h"
};
/* Contains 'struct nxm_field's. */
-static struct hmap all_fields = HMAP_INITIALIZER(&all_fields);
+static struct hmap all_fields;
+
+/* Maps from an mf_field's 'name' or 'extra_name' to the mf_field. */
+static struct shash mf_by_name;
/* Rate limit for parse errors. These always indicate a bug in an OpenFlow
* controller and so there's not much point in showing a lot of them. */
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
const struct mf_field *mf_from_nxm_header__(uint32_t header);
+static void nxm_init(void);
/* Returns the field with the given 'id'. */
const struct mf_field *
const struct mf_field *
mf_from_name(const char *name)
{
- static struct shash mf_by_name = SHASH_INITIALIZER(&mf_by_name);
-
- if (shash_is_empty(&mf_by_name)) {
- const struct mf_field *mf;
-
- for (mf = mf_fields; mf < &mf_fields[MFF_N_IDS]; mf++) {
- shash_add_once(&mf_by_name, mf->name, mf);
- if (mf->extra_name) {
- shash_add_once(&mf_by_name, mf->extra_name, mf);
- }
- }
- }
-
+ nxm_init();
return shash_find_data(&mf_by_name, name);
}
}
static void
-nxm_init(void)
+nxm_do_init(void)
{
const struct mf_field *mf;
+ hmap_init(&all_fields);
+ shash_init(&mf_by_name);
for (mf = mf_fields; mf < &mf_fields[MFF_N_IDS]; mf++) {
nxm_init_add_field(mf, mf->nxm_header);
if (mf->oxm_header != mf->nxm_header) {
nxm_init_add_field(mf, mf->oxm_header);
}
+
+ shash_add_once(&mf_by_name, mf->name, mf);
+ if (mf->extra_name) {
+ shash_add_once(&mf_by_name, mf->extra_name, mf);
+ }
}
}
+static void
+nxm_init(void)
+{
+ static pthread_once_t once = PTHREAD_ONCE_INIT;
+ pthread_once(&once, nxm_do_init);
+}
+
const struct mf_field *
mf_from_nxm_header(uint32_t header)
{
- if (hmap_is_empty(&all_fields)) {
- nxm_init();
- }
+ nxm_init();
return mf_from_nxm_header__(header);
}
format_odp_sample_action(struct ds *ds, const struct nlattr *attr)
{
static const struct nl_policy ovs_sample_policy[] = {
- [OVS_SAMPLE_ATTR_PROBABILITY] = { .type = NL_A_U32 },
- [OVS_SAMPLE_ATTR_ACTIONS] = { .type = NL_A_NESTED }
+ { NL_A_NO_ATTR, 0, 0, false }, /* OVS_SAMPLE_ATTR_UNSPEC */
+ { NL_A_U32, 0, 0, false }, /* OVS_SAMPLE_ATTR_PROBABILITY */
+ { NL_A_NESTED, 0, 0, false }, /* OVS_SAMPLE_ATTR_ACTIONS */
};
struct nlattr *a[ARRAY_SIZE(ovs_sample_policy)];
double percentage;
format_odp_userspace_action(struct ds *ds, const struct nlattr *attr)
{
static const struct nl_policy ovs_userspace_policy[] = {
- [OVS_USERSPACE_ATTR_PID] = { .type = NL_A_U32 },
- [OVS_USERSPACE_ATTR_USERDATA] = { .type = NL_A_UNSPEC,
- .optional = true },
+ { NL_A_NO_ATTR, 0, 0, false }, /* OVS_USERSPACE_ATTR_UNSPEC */
+ { NL_A_U32, 0, 0, false }, /* OVS_USERSPACE_ATTR_PID */
+ { NL_A_UNSPEC, 0, 0, true }, /* OVS_USERSPACE_ATTR_USERDATA */
};
struct nlattr *a[ARRAY_SIZE(ovs_userspace_policy)];
const struct nlattr *userdata_attr;
#include "ofpbuf.h"
#include "openflow/nicira-ext.h"
#include "openflow/openflow.h"
+#include "ovs-thread.h"
#include "vlog.h"
VLOG_DEFINE_THIS_MODULE(ofp_msgs);
static void
ofpmsgs_init(void)
{
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
const struct raw_info *info;
- if (raw_instance_map.buckets) {
+ if (!ovsthread_once_start(&once)) {
return;
}
ofphdrs_hash(&inst->hdrs));
}
}
+
+ ovsthread_once_done(&once);
}
#include "dynamic-string.h"
#include "hash.h"
+#include "ovs-thread.h"
#include "ovsdb-error.h"
#include "ovsdb-parser.h"
#include "json.h"
ovsdb_atom_default(enum ovsdb_atomic_type type)
{
static union ovsdb_atom default_atoms[OVSDB_N_TYPES];
- static bool inited;
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
- if (!inited) {
+ if (ovsthread_once_start(&once)) {
int i;
for (i = 0; i < OVSDB_N_TYPES; i++) {
ovsdb_atom_init_default(&default_atoms[i], i);
}
}
- inited = true;
+ ovsthread_once_done(&once);
}
ovs_assert(ovsdb_atomic_type_is_valid(type));
/* Iteration macros. */
#define SSET_FOR_EACH(NAME, SSET) \
for ((NAME) = SSET_FIRST(SSET); \
- SSET_NODE_FROM_NAME(NAME) != NULL; \
+ NAME != NULL; \
(NAME) = SSET_NEXT(SSET, NAME))
#define SSET_FOR_EACH_SAFE(NAME, NEXT, SSET) \
for ((NAME) = SSET_FIRST(SSET); \
- (SSET_NODE_FROM_NAME(NAME) != NULL \
+ (NAME != NULL \
? (NEXT) = SSET_NEXT(SSET, NAME), true \
: false); \
(NAME) = (NEXT))
#define SSET_NODE_FROM_HMAP_NODE(HMAP_NODE) \
CONTAINER_OF(HMAP_NODE, struct sset_node, hmap_node)
#define SSET_NAME_FROM_HMAP_NODE(HMAP_NODE) \
- (CONST_CAST(const char *, (SSET_NODE_FROM_HMAP_NODE(HMAP_NODE)->name)))
+ HMAP_NODE == NULL \
+ ? NULL \
+ : (CONST_CAST(const char *, (SSET_NODE_FROM_HMAP_NODE(HMAP_NODE)->name)))
#define SSET_NODE_FROM_NAME(NAME) CONTAINER_OF(NAME, struct sset_node, name)
#define SSET_FIRST(SSET) SSET_NAME_FROM_HMAP_NODE(hmap_first(&(SSET)->map))
#define SSET_NEXT(SSET, NAME) \
* that that OBJECT points to, assigns the address of the outer object to
* OBJECT, which must be an lvalue.
*
- * Evaluates to 1. */
+ * Evaluates to (void) 0 as the result is not to be used. */
#define ASSIGN_CONTAINER(OBJECT, POINTER, MEMBER) \
- ((OBJECT) = OBJECT_CONTAINING(POINTER, OBJECT, MEMBER), 1)
+ ((OBJECT) = OBJECT_CONTAINING(POINTER, OBJECT, MEMBER), (void) 0)
#ifdef __cplusplus
extern "C" {
* it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
* when an input bundle is needed for validation (e.g., mirroring or
* OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
- * any 'port' structs, so care must be taken when dealing with it. */
-static struct xbundle ofpp_none_bundle = {
- .name = "OFPP_NONE",
- .vlan_mode = PORT_VLAN_TRUNK
-};
+ * any 'port' structs, so care must be taken when dealing with it.
+ * The bundle's name and vlan mode are initialized in lookup_input_bundle() */
+static struct xbundle ofpp_none_bundle;
static struct hmap xbridges = HMAP_INITIALIZER(&xbridges);
static struct hmap xbundles = HMAP_INITIALIZER(&xbundles);
/* Special-case OFPP_NONE, which a controller may use as the ingress
* port for traffic that it is sourcing. */
if (in_port == OFPP_NONE) {
+ ofpp_none_bundle.name = "OFPP_NONE";
+ ofpp_none_bundle.vlan_mode = PORT_VLAN_TRUNK;
return &ofpp_none_bundle;
}
/* Format for table output. */
static struct table_style table_style = TABLE_STYLE_DEFAULT;
-static const struct ovsdb_client_command all_commands[];
+static const struct ovsdb_client_command *get_all_commands(void);
static void usage(void) NO_RETURN;
static void parse_options(int argc, char *argv[]);
ovs_fatal(0, "missing command name; use --help for help");
}
- for (command = all_commands; ; command++) {
+ for (command = get_all_commands(); ; command++) {
if (!command->name) {
VLOG_FATAL("unknown command '%s'; use --help for help",
argv[optind]);
{ NULL, 0, 0, 0, NULL },
};
+
+static const struct ovsdb_client_command *get_all_commands(void)
+{
+ return all_commands;
+}
/* -m, --more: Verbosity level for "show-log" command output. */
static int show_log_verbosity;
-static const struct command all_commands[];
+static const struct command *get_all_commands(void);
static void usage(void) NO_RETURN;
static void parse_options(int argc, char *argv[]);
set_program_name(argv[0]);
parse_options(argc, argv);
signal(SIGPIPE, SIG_IGN);
- run_command(argc - optind, argv + optind, all_commands);
+ run_command(argc - optind, argv + optind, get_all_commands());
return 0;
}
{ "help", 0, INT_MAX, do_help },
{ NULL, 0, 0, NULL },
};
+
+static const struct command *get_all_commands(void)
+{
+ return all_commands;
+}
static double timeout;
-static const struct command all_commands[];
+static const struct command *get_all_commands(void);
static void parse_options(int argc, char *argv[]);
static void usage(void);
set_program_name(argv[0]);
vlog_set_levels(NULL, VLF_ANY_FACILITY, VLL_EMER);
parse_options(argc, argv);
- run_command(argc - optind, argv + optind, all_commands);
+ run_command(argc - optind, argv + optind, get_all_commands());
return 0;
}
{ "help", 0, 0, cmd_help },
{ NULL, 0, 0, NULL },
};
+
+static const struct command *get_all_commands(void)
+{
+ return all_commands;
+}
* the option itself. */
static int verbosity;
-static const struct command all_commands[];
+static const struct command *get_all_commands(void);
static void usage(void) NO_RETURN;
static void parse_options(int argc, char *argv[]);
set_program_name(argv[0]);
parse_options(argc, argv);
signal(SIGPIPE, SIG_IGN);
- run_command(argc - optind, argv + optind, all_commands);
+ run_command(argc - optind, argv + optind, get_all_commands());
return 0;
}
{ NULL, 0, 0, NULL },
};
+
+static const struct command *get_all_commands(void)
+{
+ return all_commands;
+}
static struct sort_criterion *criteria;
static size_t n_criteria, allocated_criteria;
-static const struct command all_commands[];
+static const struct command *get_all_commands(void);
static void usage(void) NO_RETURN;
static void parse_options(int argc, char *argv[]);
set_program_name(argv[0]);
parse_options(argc, argv);
signal(SIGPIPE, SIG_IGN);
- run_command(argc - optind, argv + optind, all_commands);
+ run_command(argc - optind, argv + optind, get_all_commands());
return 0;
}
{ NULL, 0, 0, NULL },
};
+
+static const struct command *get_all_commands(void)
+{
+ return all_commands;
+}
static struct table_style table_style = TABLE_STYLE_DEFAULT;
/* All supported commands. */
-static const struct vsctl_command_syntax all_commands[];
+static const struct vsctl_command_syntax *get_all_commands(void);
/* The IDL we're using and the current transaction, if any.
* This is for use by vsctl_exit() only, to allow it to clean up.
options = xmemdup(global_long_options, sizeof global_long_options);
allocated_options = ARRAY_SIZE(global_long_options);
n_options = n_global_long_options;
- for (p = all_commands; p->name; p++) {
+ for (p = get_all_commands(); p->name; p++) {
if (p->options[0]) {
char *save_ptr = NULL;
char *name;
if (shash_is_empty(&commands)) {
const struct vsctl_command_syntax *p;
- for (p = all_commands; p->name; p++) {
+ for (p = get_all_commands(); p->name; p++) {
shash_add_assert(&commands, p->name, p);
}
}
{NULL, 0, 0, NULL, NULL, NULL, NULL, RO},
};
+static const struct vsctl_command_syntax *get_all_commands(void)
+{
+ return all_commands;
+}
.IP "\fBqos/show\fR \fIinterface\fR"
Queries the kernel for Quality of Service configuration and statistics
associated with the given \fIinterface\fR.
+.IP "\fBbfd/show\fR [\fIinterface\fR]"
+Displays detailed information about Bidirectional Forwarding Detection
+configured on \fIinterface\fR. If \fIinterface\fR is not specified,
+then displays detailed information about all interfaces with BFD
+enabled.
+.IP "\fBbfd/set-forwarding\fR [\fIinterface\fR] \fIstatus\fR"
+Force the fault status of the BFD module on \fIinterface\fR (or all
+interfaces if none is given) to be \fIstatus\fR. \fIstatus\fR can be
+"true", "false", or "normal" which reverts to the standard behavior.
.IP "\fBcfm/show\fR [\fIinterface\fR]"
Displays detailed information about Connectivity Fault Management
configured on \fIinterface\fR. If \fIinterface\fR is not specified,
#include "xenserver.h"
#include <ctype.h>
#include <errno.h>
+#include <pthread.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
VLOG_DEFINE_THIS_MODULE(xenserver);
-static char *
+/* If running on a XenServer, the XenServer host UUID as a 36-character string,
+ * otherwise null. */
+static char *host_uuid;
+
+static void
read_host_uuid(void)
{
static const char filename[] = "/etc/xensource-inventory";
} else {
VLOG_INFO("%s: open: %s", filename, ovs_strerror(errno));
}
- return NULL;
+ return;
}
while (fgets(line, sizeof line, file)) {
if (strlen(line) == leader_len + uuid_len + trailer_len
&& !memcmp(line, leader, leader_len)
&& !memcmp(line + leader_len + uuid_len, trailer, trailer_len)) {
- char *host_uuid = xmemdup0(line + leader_len, uuid_len);
+ host_uuid = xmemdup0(line + leader_len, uuid_len);
VLOG_INFO("running on XenServer, host-uuid %s", host_uuid);
fclose(file);
- return host_uuid;
+ return;
}
}
fclose(file);
VLOG_ERR("%s: INSTALLATION_UUID not found", filename);
- return NULL;
}
const char *
xenserver_get_host_uuid(void)
{
- static char *host_uuid;
- static bool inited;
-
- if (!inited) {
- host_uuid = read_host_uuid();
- inited = true;
- }
+ static pthread_once_t once = PTHREAD_ONCE_INIT;
+ pthread_once(&once, read_host_uuid);
return host_uuid;
}