/*
- * Distributed under the terms of the GNU GPL version 2.
- * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
+ * Copyright (c) 2007-2011 Nicira Networks.
*
- * Significant portions of this file may be copied from parts of the Linux
- * kernel, by Linus Torvalds and others.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
*/
#include "flow.h"
sizeof(struct icmphdr));
}
-u64 flow_used_time(unsigned long flow_jiffies)
+u64 ovs_flow_used_time(unsigned long flow_jiffies)
{
struct timespec cur_ts;
u64 cur_ms, idle_ms;
key->ip.tos = ipv6_get_dsfield(nh);
key->ip.ttl = nh->hop_limit;
key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
- ipv6_addr_copy(&key->ipv6.addr.src, &nh->saddr);
- ipv6_addr_copy(&key->ipv6.addr.dst, &nh->daddr);
+ key->ipv6.addr.src = nh->saddr;
+ key->ipv6.addr.dst = nh->daddr;
payload_ofs = skip_exthdr(skb, payload_ofs, &nexthdr, &key->ip.frag);
if (unlikely(payload_ofs < 0))
#define TCP_FLAGS_OFFSET 13
#define TCP_FLAG_MASK 0x3f
-void flow_used(struct sw_flow *flow, struct sk_buff *skb)
+void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
{
u8 tcp_flags = 0;
spin_unlock(&flow->lock);
}
-struct sw_flow_actions *flow_actions_alloc(const struct nlattr *actions)
+struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions)
{
int actions_len = nla_len(actions);
struct sw_flow_actions *sfa;
return sfa;
}
-struct sw_flow *flow_alloc(void)
+struct sw_flow *ovs_flow_alloc(void)
{
struct sw_flow *flow;
return flow;
}
-static struct hlist_head __rcu *find_bucket(struct flow_table * table, u32 hash)
+static struct hlist_head *find_bucket(struct flow_table *table, u32 hash)
{
return flex_array_get(table->buckets,
(hash & (table->n_buckets - 1)));
}
-static struct flex_array __rcu *alloc_buckets(unsigned int n_buckets)
+static struct flex_array *alloc_buckets(unsigned int n_buckets)
{
- struct flex_array __rcu *buckets;
+ struct flex_array *buckets;
int i, err;
buckets = flex_array_alloc(sizeof(struct hlist_head *),
flex_array_free(buckets);
}
-struct flow_table *flow_tbl_alloc(int new_size)
+struct flow_table *ovs_flow_tbl_alloc(int new_size)
{
struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
static void flow_free(struct sw_flow *flow)
{
flow->dead = true;
- flow_put(flow);
+ ovs_flow_put(flow);
}
-void flow_tbl_destroy(struct flow_table *table)
+void ovs_flow_tbl_destroy(struct flow_table *table)
{
int i;
{
struct flow_table *table = container_of(rcu, struct flow_table, rcu);
- flow_tbl_destroy(table);
+ ovs_flow_tbl_destroy(table);
}
-void flow_tbl_deferred_destroy(struct flow_table *table)
+void ovs_flow_tbl_deferred_destroy(struct flow_table *table)
{
if (!table)
return;
call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
}
-struct sw_flow *flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
+struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
{
struct sw_flow *flow;
struct hlist_head *head;
return NULL;
}
-struct flow_table *flow_tbl_expand(struct flow_table *table)
+struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
{
struct flow_table *new_table;
int n_buckets = table->n_buckets * 2;
int i;
- new_table = flow_tbl_alloc(n_buckets);
+ new_table = ovs_flow_tbl_alloc(n_buckets);
if (!new_table)
return ERR_PTR(-ENOMEM);
hlist_for_each_entry_safe(flow, n, pos, head, hash_node) {
hlist_del_init_rcu(&flow->hash_node);
- flow_tbl_insert(new_table, flow);
+ ovs_flow_tbl_insert(new_table, flow);
}
}
return new_table;
}
-/* RCU callback used by flow_deferred_free. */
+/* RCU callback used by ovs_flow_deferred_free. */
static void rcu_free_flow_callback(struct rcu_head *rcu)
{
struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
flow->dead = true;
- flow_put(flow);
+ ovs_flow_put(flow);
}
/* Schedules 'flow' to be freed after the next RCU grace period.
* The caller must hold rcu_read_lock for this to be sensible. */
-void flow_deferred_free(struct sw_flow *flow)
+void ovs_flow_deferred_free(struct sw_flow *flow)
{
call_rcu(&flow->rcu, rcu_free_flow_callback);
}
-void flow_hold(struct sw_flow *flow)
+void ovs_flow_hold(struct sw_flow *flow)
{
atomic_inc(&flow->refcnt);
}
-void flow_put(struct sw_flow *flow)
+void ovs_flow_put(struct sw_flow *flow)
{
if (unlikely(!flow))
return;
}
}
-/* RCU callback used by flow_deferred_free_acts. */
+/* RCU callback used by ovs_flow_deferred_free_acts. */
static void rcu_free_acts_callback(struct rcu_head *rcu)
{
struct sw_flow_actions *sf_acts = container_of(rcu,
/* Schedules 'sf_acts' to be freed after the next RCU grace period.
* The caller must hold rcu_read_lock for this to be sensible. */
-void flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
+void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
{
call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
}
}
nd = (struct nd_msg *)skb_transport_header(skb);
- ipv6_addr_copy(&key->ipv6.nd.target, &nd->target);
+ key->ipv6.nd.target = nd->target;
key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
icmp_len -= sizeof(*nd);
}
/**
- * flow_extract - extracts a flow key from an Ethernet frame.
+ * ovs_flow_extract - extracts a flow key from an Ethernet frame.
* @skb: sk_buff that contains the frame, with skb->data pointing to the
* Ethernet header
* @in_port: port number on which @skb was received.
* of a correct length, otherwise the same as skb->network_header.
* For other key->dl_type values it is left untouched.
*/
-int flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
+int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
int *key_lenp)
{
int error = 0;
return error;
}
-u32 flow_hash(const struct sw_flow_key *key, int key_len)
+u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len)
{
return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), hash_seed);
}
-struct sw_flow *flow_tbl_lookup(struct flow_table *table,
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
struct sw_flow_key *key, int key_len)
{
struct sw_flow *flow;
struct hlist_head *head;
u32 hash;
- hash = flow_hash(key, key_len);
+ hash = ovs_flow_hash(key, key_len);
head = find_bucket(table, hash);
hlist_for_each_entry_rcu(flow, n, head, hash_node) {
return NULL;
}
-void flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
+void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
{
struct hlist_head *head;
table->count++;
}
-void flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
+void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
{
if (!hlist_unhashed(&flow->hash_node)) {
hlist_del_init_rcu(&flow->hash_node);
}
/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
-const u32 ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
- [OVS_KEY_ATTR_ENCAP] = 0,
+const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
+ [OVS_KEY_ATTR_ENCAP] = -1,
[OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
[OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
[OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
attrs = 0;
nla_for_each_nested(nla, attr, rem) {
u16 type = nla_type(nla);
+ int expected_len;
- if (type > OVS_KEY_ATTR_MAX || attrs & (1ULL << type) ||
- nla_len(nla) != ovs_key_lens[type])
+ if (type > OVS_KEY_ATTR_MAX || attrs & (1ULL << type))
return -EINVAL;
+
+ expected_len = ovs_key_lens[type];
+ if (nla_len(nla) != expected_len && expected_len != -1)
+ return -EINVAL;
+
attrs |= 1ULL << type;
a[type] = nla;
}
}
/**
- * flow_from_nlattrs - parses Netlink attributes into a flow key.
+ * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key.
* @swkey: receives the extracted flow key.
* @key_lenp: number of bytes used in @swkey.
* @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
* sequence.
*/
-int flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
+int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
const struct nlattr *attr)
{
const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
}
/**
- * flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
+ * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
* @in_port: receives the extracted input port.
* @tun_id: receives the extracted tunnel ID.
* @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
* get the metadata, that is, the parts of the flow key that cannot be
* extracted from the packet itself.
*/
-int flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id,
- const struct nlattr *attr)
+int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id,
+ const struct nlattr *attr)
{
const struct nlattr *nla;
int rem;
nla_for_each_nested(nla, attr, rem) {
int type = nla_type(nla);
- if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] != 0) {
+ if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) {
if (nla_len(nla) != ovs_key_lens[type])
return -EINVAL;
return 0;
}
-int flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
+int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
{
struct ovs_key_ethernet *eth_key;
struct nlattr *nla, *encap;
/* Initializes the flow module.
* Returns zero if successful or a negative error code. */
-int flow_init(void)
+int ovs_flow_init(void)
{
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
0, NULL);
}
/* Uninitializes the flow module. */
-void flow_exit(void)
+void ovs_flow_exit(void)
{
kmem_cache_destroy(flow_cache);
}